diff options
Diffstat (limited to 'drivers/net/ethernet')
573 files changed, 28151 insertions, 14284 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 87c906e744fb..846fa3af4504 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -270,7 +270,7 @@ static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr, { struct el3_private *lp = netdev_priv(dev); - memcpy(dev->dev_addr, phys_addr, ETH_ALEN); + eth_hw_addr_set(dev, (u8 *)phys_addr); dev->base_addr = ioaddr; dev->irq = irq; dev->if_port = if_port; diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 6f0ea2facea9..1d124b0f65e7 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -567,6 +567,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, { struct corkscrew_private *vp = netdev_priv(dev); unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + __be16 addr[ETH_ALEN / 2]; int i; int irq; @@ -619,7 +620,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, /* Read the station address from the EEPROM. */ EL3WINDOW(0); for (i = 0; i < 0x18; i++) { - __be16 *phys_addr = (__be16 *) dev->dev_addr; int timer; outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd); /* Pause for at least 162 us. for the read to take place. */ @@ -631,8 +631,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, eeprom[i] = inw(ioaddr + Wn0EepromData); checksum ^= eeprom[i]; if (i < 3) - phys_addr[i] = htons(eeprom[i]); + addr[i] = htons(eeprom[i]); } + eth_hw_addr_set(dev, (u8 *)addr); checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index dd4d3c48b98d..dc3b7c960611 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -305,15 +305,13 @@ static int tc574_config(struct pcmcia_device *link) struct net_device *dev = link->priv; struct el3_private *lp = netdev_priv(dev); int ret, i, j; + __be16 addr[ETH_ALEN / 2]; unsigned int ioaddr; - __be16 *phys_addr; char *cardname; __u32 config; u8 *buf; size_t len; - phys_addr = (__be16 *)dev->dev_addr; - dev_dbg(&link->dev, "3c574_config()\n"); link->io_lines = 16; @@ -347,19 +345,20 @@ static int tc574_config(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x88, &buf); if (buf && len >= 6) { for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); + addr[i] = htons(le16_to_cpu(buf[i * 2])); kfree(buf); } else { kfree(buf); /* 0 < len < 6 */ EL3WINDOW(0); for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); - if (phys_addr[0] == htons(0x6060)) { + addr[i] = htons(read_eeprom(ioaddr, i + 10)); + if (addr[0] == htons(0x6060)) { pr_notice("IO port conflict at 0x%03lx-0x%03lx\n", dev->base_addr, dev->base_addr+15); goto failed; } } + eth_hw_addr_set(dev, (u8 *)addr); if (link->prod_id[1]) cardname = link->prod_id[1]; else diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 09816e84314d..4673bc1604e7 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -237,8 +237,8 @@ static void tc589_detach(struct pcmcia_device *link) static int tc589_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; - __be16 *phys_addr; int ret, i, j, multi = 0, fifo; + __be16 addr[ETH_ALEN / 2]; unsigned int ioaddr; static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; u8 *buf; @@ -246,7 +246,6 @@ static int tc589_config(struct pcmcia_device *link) dev_dbg(&link->dev, "3c589_config\n"); - phys_addr = (__be16 *)dev->dev_addr; /* Is this a 3c562? */ if (link->manf_id != MANFID_3COM) dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); @@ -285,18 +284,19 @@ static int tc589_config(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x88, &buf); if (buf && len >= 6) { for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i*2])); + addr[i] = htons(le16_to_cpu(buf[i*2])); kfree(buf); } else { kfree(buf); /* 0 < len < 6 */ for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - if (phys_addr[0] == htons(0x6060)) { + addr[i] = htons(read_eeprom(ioaddr, i)); + if (addr[0] == htons(0x6060)) { dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", dev->base_addr, dev->base_addr+15); goto failed; } } + eth_hw_addr_set(dev, (u8 *)addr); /* The address and resource configuration register aren't loaded from * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 7b0ae9efc004..ccf07667aa5e 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1091,6 +1091,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, struct vortex_private *vp; int option; unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + __be16 addr[ETH_ALEN / 2]; int i, step; struct net_device *dev; static int printed_version; @@ -1284,7 +1285,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) - ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); + addr[i] = htons(eeprom[i + 10]); + eth_hw_addr_set(dev, (u8 *)addr); if (print_info) pr_cont(" %pM", dev->dev_addr); /* Unfortunately an all zero eeprom passes the checksum and this diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index da1ae37a9d73..991ad953aa79 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -320,8 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; + eth_hw_addr_set(dev, SA_prom); pr_cont(" %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 6c6bdd5913ec..1f8acbba5b6b 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -716,7 +716,7 @@ static int ax_init_dev(struct net_device *dev) for (i = 0; i < 16; i++) SA_prom[i] = SA_prom[i+i]; - memcpy(dev->dev_addr, SA_prom, ETH_ALEN); + eth_hw_addr_set(dev, SA_prom); } #ifdef CONFIG_AX88796_93CX6 @@ -733,7 +733,7 @@ static int ax_init_dev(struct net_device *dev) (__le16 __force *)mac_addr, sizeof(mac_addr) >> 1); - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, mac_addr); } #endif if (ax->plat->wordlength == 2) { @@ -748,16 +748,18 @@ static int ax_init_dev(struct net_device *dev) /* load the mac-address from the device */ if (ax->plat->flags & AXFLG_MAC_FROMDEV) { + u8 addr[ETH_ALEN]; + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ei_local->mem + E8390_CMD); /* 0x61 */ for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = - ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); + addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); + eth_hw_addr_set(dev, addr); } if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && ax->plat->mac_addr) - memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, ax->plat->mac_addr); if (!is_valid_ether_addr(dev->dev_addr)) { eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 3c370e686ec3..3aef959fc25b 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -187,6 +187,7 @@ static int get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; + u8 addr[ETH_ALEN]; int i, j; /* This is based on drivers/net/ethernet/8390/ne.c */ @@ -220,9 +221,11 @@ static int get_prom(struct pcmcia_device *link) for (i = 0; i < 6; i += 2) { j = inw(ioaddr + AXNET_DATAPORT); - dev->dev_addr[i] = j & 0xff; - dev->dev_addr[i+1] = j >> 8; + addr[i] = j & 0xff; + addr[i+1] = j >> 8; } + eth_hw_addr_set(dev, addr); + return 1; } /* get_prom */ diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 4ad8031ab669..e320cccba61a 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -374,8 +374,7 @@ static int mcf8390_init(struct net_device *dev) if (ret) return ret; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; + eth_hw_addr_set(dev, SA_prom); netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 9afc712f5948..0a9118b8be0c 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -500,9 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) dev->base_addr = ioaddr; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = SA_prom[i]; - } + eth_hw_addr_set(dev, SA_prom); pr_cont("%pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index d6715008e04d..6a0a2039600a 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -390,7 +390,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, dev->ethtool_ops = &ne2k_pci_ethtool_ops; NS8390_init(dev, 0); - memcpy(dev->dev_addr, SA_prom, dev->addr_len); + eth_hw_addr_set(dev, SA_prom); i = register_netdev(dev); if (i) diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 96ad72abd373..0f07fe03da98 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -278,6 +278,7 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link) { struct net_device *dev = link->priv; u_char __iomem *base, *virt; + u8 addr[ETH_ALEN]; int i, j; /* Allocate a small memory window */ @@ -302,7 +303,8 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link) (readb(base+2) == hw_info[i].a1) && (readb(base+4) == hw_info[i].a2)) { for (j = 0; j < 6; j++) - dev->dev_addr[j] = readb(base + (j<<1)); + addr[j] = readb(base + (j<<1)); + eth_hw_addr_set(dev, addr); break; } } @@ -324,6 +326,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; + u8 addr[ETH_ALEN]; u_char prom[32]; int i, j; @@ -362,7 +365,8 @@ static struct hw_info *get_prom(struct pcmcia_device *link) } if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) { for (j = 0; j < 6; j++) - dev->dev_addr[j] = prom[j<<1]; + addr[j] = prom[j<<1]; + eth_hw_addr_set(dev, addr); return (i < NR_INFO) ? hw_info+i : &default_info; } return NULL; @@ -377,6 +381,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link) static struct hw_info *get_dl10019(struct pcmcia_device *link) { struct net_device *dev = link->priv; + u8 addr[ETH_ALEN]; int i; u_char sum; @@ -385,7 +390,8 @@ static struct hw_info *get_dl10019(struct pcmcia_device *link) if (sum != 0xff) return NULL; for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i); + addr[i] = inb_p(dev->base_addr + 0x14 + i); + eth_hw_addr_set(dev, addr); i = inb(dev->base_addr + 0x1f); return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info; } @@ -400,6 +406,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; + u8 addr[ETH_ALEN]; int i, j; /* Not much of a test, but the alternatives are messy */ @@ -413,9 +420,10 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link) for (i = 0; i < 6; i += 2) { j = inw(ioaddr + PCNET_DATAPORT); - dev->dev_addr[i] = j & 0xff; - dev->dev_addr[i+1] = j >> 8; + addr[i] = j & 0xff; + addr[i+1] = j >> 8; } + eth_hw_addr_set(dev, addr); return NULL; } @@ -430,6 +438,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link) static struct hw_info *get_hwired(struct pcmcia_device *link) { struct net_device *dev = link->priv; + u8 addr[ETH_ALEN]; int i; for (i = 0; i < 6; i++) @@ -438,7 +447,8 @@ static struct hw_info *get_hwired(struct pcmcia_device *link) return NULL; for (i = 0; i < 6; i++) - dev->dev_addr[i] = hw_addr[i]; + addr[i] = hw_addr[i]; + eth_hw_addr_set(dev, addr); return &default_info; } /* get_hwired */ diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c index fbbd7f22c142..bd89ca8a92df 100644 --- a/drivers/net/ethernet/8390/stnic.c +++ b/drivers/net/ethernet/8390/stnic.c @@ -104,8 +104,8 @@ STNIC_WRITE (int reg, byte val) static int __init stnic_probe(void) { struct net_device *dev; - int i, err; struct ei_device *ei_local; + int err; /* If we are not running on a SolutionEngine, give up now */ if (! MACH_SE) @@ -119,8 +119,7 @@ static int __init stnic_probe(void) #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); #endif - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = stnic_eadr[i]; + eth_hw_addr_set(dev, stnic_eadr); /* Set the base address to point to the NIC, not the "real" base! */ dev->base_addr = 0x1000; diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c index 35a500a21521..e8b4fe813a08 100644 --- a/drivers/net/ethernet/8390/zorro8390.c +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -364,8 +364,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board, if (i) return i; - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; + eth_hw_addr_set(dev, SA_prom); pr_debug("Found ethernet address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index c4ecf4fcadf8..1cfdd01b4c2e 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -342,7 +342,7 @@ static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv) static void owl_emac_set_hw_mac_addr(struct net_device *netdev) { struct owl_emac_priv *priv = netdev_priv(netdev); - u8 *mac_addr = netdev->dev_addr; + const u8 *mac_addr = netdev->dev_addr; u32 addr_high, addr_low; addr_high = mac_addr[0] << 8 | mac_addr[1]; @@ -1173,7 +1173,7 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, skaddr->sa_data); owl_emac_set_hw_mac_addr(netdev); return owl_emac_setup_frame_xmit(netdev_priv(netdev)); @@ -1385,7 +1385,7 @@ static void owl_emac_get_mac_addr(struct net_device *netdev) struct device *dev = netdev->dev.parent; int ret; - ret = eth_platform_get_mac_address(dev, netdev->dev_addr); + ret = platform_get_ethdev_address(dev, netdev); if (!ret && is_valid_ether_addr(netdev->dev_addr)) return; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index e0f6cc910bd2..16b6b83f670b 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -955,7 +955,7 @@ static int netdev_open(struct net_device *dev) writew(0, ioaddr + PerfFilterTable + 4); writew(0, ioaddr + PerfFilterTable + 8); for (i = 1; i < 16; i++) { - __be16 *eaddrs = (__be16 *)dev->dev_addr; + const __be16 *eaddrs = (const __be16 *)dev->dev_addr; void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16; writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4; writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4; @@ -1787,14 +1787,14 @@ static void set_rx_mode(struct net_device *dev) } else if (netdev_mc_count(dev) <= 14) { /* Use the 16 element perfect filter, skip first two entries. */ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; - __be16 *eaddrs; + const __be16 *eaddrs; netdev_for_each_mc_addr(ha, dev) { eaddrs = (__be16 *) ha->addr; writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; } - eaddrs = (__be16 *)dev->dev_addr; + eaddrs = (const __be16 *)dev->dev_addr; i = netdev_mc_count(dev) + 2; while (i++ < 16) { writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; @@ -1805,7 +1805,7 @@ static void set_rx_mode(struct net_device *dev) } else { /* Must use a multicast hash table. */ void __iomem *filter_addr; - __be16 *eaddrs; + const __be16 *eaddrs; __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); @@ -1819,7 +1819,7 @@ static void set_rx_mode(struct net_device *dev) } /* Clear the perfect filter list, skip first two entries. */ filter_addr = ioaddr + PerfFilterTable + 2 * 16; - eaddrs = (__be16 *)dev->dev_addr; + eaddrs = (const __be16 *)dev->dev_addr; for (i = 2; i < 16; i++) { writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index c560ad06f0be..cc34eaf0491f 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1025,7 +1025,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev->dev_addr[5]); diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 920633161174..f4edc616388c 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3863,7 +3863,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) et131x_init_send(adapter); et131x_hwaddr_init(adapter); - ether_addr_copy(netdev->dev_addr, adapter->addr); + eth_hw_addr_set(netdev, adapter->addr); /* Init the device with the new settings */ et131x_adapter_setup(adapter); @@ -3966,7 +3966,7 @@ static int et131x_pci_setup(struct pci_dev *pdev, netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); - ether_addr_copy(netdev->dev_addr, adapter->addr); + eth_hw_addr_set(netdev, adapter->addr); rc = -ENOMEM; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 696517eae77f..1fc9a1cd3ef8 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1008,7 +1008,7 @@ static void slic_set_link_autoneg(struct slic_device *sdev) static void slic_set_mac_address(struct slic_device *sdev) { - u8 *addr = sdev->netdev->dev_addr; + const u8 *addr = sdev->netdev->dev_addr; u32 val; val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24; @@ -1660,7 +1660,7 @@ static int slic_read_eeprom(struct slic_device *sdev) goto free_eeprom; } /* set mac address */ - ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]); + eth_hw_addr_set(sdev->netdev, mac[devfn]); free_eeprom: dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 037baea1c738..800ee022388f 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -356,7 +356,7 @@ static int emac_set_mac_address(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev-> dev_addr[2], db->membase + EMAC_MAC_A1_REG); @@ -852,7 +852,7 @@ static int emac_probe(struct platform_device *pdev) } /* Read MAC-address from DT */ - ret = of_get_mac_address(np, ndev->dev_addr); + ret = of_get_ethdev_address(np, ndev); if (ret) { /* if the MAC address is invalid get a random one */ eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 9dc12b13061f..eeb86bd851f9 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2712,15 +2712,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p) struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; struct sockaddr *addr=p; - u8 *da; + const u8 *da; struct cmd cmd; if(netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); - da = (u8 *)dev->dev_addr; + da = (const u8 *)dev->dev_addr; writel(da[0] << 8 | da[1], ®s->MacAddrHi); writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 1c00d719e5d7..d75d95a97dd9 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -849,7 +849,7 @@ static int init_phy(struct net_device *dev) return 0; } -static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) +static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr) { u32 msb; u32 lsb; @@ -1524,7 +1524,7 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; /* get default MAC address from device tree */ - ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr); + ret = of_get_ethdev_address(pdev->dev.of_node, ndev); if (ret) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0e43000614ab..7d5d885d85d5 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4073,7 +4073,7 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter, ether_addr_copy(adapter->mac_addr, netdev->dev_addr); } else { ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); - ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + eth_hw_addr_set(netdev, adapter->mac_addr); } /* Set offload features */ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 4786f0504691..899c8a2a34b6 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -168,7 +168,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM + depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM depends on X86 || ARM64 || COMPILE_TEST depends on PTP_1588_CLOCK_OPTIONAL select BITREVERSE diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 92e4246dc359..0b493467b042 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1500,7 +1500,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) int i; struct sockaddr *addr = p; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&lp->lock); /* Setting the MAC address to the device */ for (i = 0; i < ETH_ALEN; i++) diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 9d2f49fd945e..9c7d9690d00c 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -582,7 +582,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, switch( lp->cardtype ) { case OLD_RIEBL: /* No ethernet address! (Set some default address) */ - memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN); + eth_hw_addr_set(dev, OldRieblDefHwaddr); break; case NEW_RIEBL: lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN); @@ -1123,7 +1123,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr ) return -EIO; } - memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); + eth_hw_addr_set(dev, saddr->sa_data); for( i = 0; i < 6; i++ ) MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 ); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 9c1636222b99..c6f003975621 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev) aup->phy1_search_mac0 = 1; } else { if (is_valid_ether_addr(pd->mac)) { - memcpy(dev->dev_addr, pd->mac, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac); } else { /* Set a random MAC since no valid provided by platform_data. */ eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 4019cab87505..30ee5329bd7c 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -529,7 +529,8 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, mace_init Resets the MACE chip. ---------------------------------------------------------------------------- */ -static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) +static int mace_init(mace_private *lp, unsigned int ioaddr, + const char *enet_addr) { int i; int ct = 0; @@ -635,7 +636,7 @@ static int nmclan_config(struct pcmcia_device *link) kfree(buf); goto failed; } - memcpy(dev->dev_addr, buf, ETH_ALEN); + eth_hw_addr_set(dev, buf); kfree(buf); /* Verify configuration by reading the MACE ID. */ diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 70d76fdb9f56..820baa2604ac 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1775,7 +1775,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) pr_cont(" warning: CSR address invalid,\n"); pr_info(" using instead PROM address of"); } - memcpy(dev->dev_addr, promaddr, ETH_ALEN); + eth_hw_addr_set(dev, promaddr); } } diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 4a845bc071b2..007bd7787291 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -305,7 +305,6 @@ static int __init lance_probe( struct net_device *dev) unsigned long ioaddr; struct lance_private *lp; - int i; static int did_version; volatile unsigned short *ioaddr_probe; unsigned short tmp1, tmp2; @@ -373,8 +372,7 @@ static int __init lance_probe( struct net_device *dev) dev->irq); /* copy in the ethernet address from the prom */ - for(i = 0; i < 6 ; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); /* tell the card it's ether address, bytes swapped */ MEM->init.hwaddr[0] = dev->dev_addr[1]; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index ddece276ae23..22d609563af8 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1301,7 +1301,6 @@ static int sparc_lance_probe_one(struct platform_device *op, struct device_node *dp = op->dev.of_node; struct lance_private *lp; struct net_device *dev; - int i; dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) @@ -1315,8 +1314,7 @@ static int sparc_lance_probe_one(struct platform_device *op, * will copy the address in the device structure to the lance * initialization block. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); /* Get the IO region */ lp->lregs = of_ioremap(&op->resource[0], 0, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d5fd49dd25f3..3936543a74d8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1080,7 +1080,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) return 0; } -static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) +static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 17a585adfb49..30d24d19f40d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1912,10 +1912,8 @@ static int xgbe_close(struct net_device *netdev) clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); - flush_workqueue(pdata->an_workqueue); destroy_workqueue(pdata->an_workqueue); - flush_workqueue(pdata->dev_workqueue); destroy_workqueue(pdata->dev_workqueue); set_bit(XGBE_DOWN, &pdata->dev_state); @@ -2016,7 +2014,7 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, saddr->sa_data); hw_if->set_mac_address(pdata, netdev->dev_addr); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index a218dc6f2edd..0e8698928e4d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -267,7 +267,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->xgmac_regs; - memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, pdata->mac_addr); /* Initialize ECC timestamps */ pdata->tx_sec_period = jiffies; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 3305979a9f7c..607a2c90513b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -729,7 +729,7 @@ struct xgbe_ext_stats { struct xgbe_hw_if { int (*tx_complete)(struct xgbe_ring_desc *); - int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr); + int (*set_mac_address)(struct xgbe_prv_data *, const u8 *addr); int (*config_rx_mode)(struct xgbe_prv_data *); int (*enable_rx_csum)(struct xgbe_prv_data *); diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c index 2da979e4fad1..6423e22e05b2 100644 --- a/drivers/net/ethernet/apm/xgene-v2/mac.c +++ b/drivers/net/ethernet/apm/xgene-v2/mac.c @@ -65,7 +65,7 @@ void xge_mac_set_speed(struct xge_pdata *pdata) void xge_mac_set_station_addr(struct xge_pdata *pdata) { - u8 *dev_addr = pdata->ndev->dev_addr; + const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 80399c8980bd..d022b6db9e06 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -36,7 +36,7 @@ static int xge_get_resources(struct xge_pdata *pdata) return -ENOMEM; } - if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) + if (device_get_ethdev_address(dev, ndev)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 5f657879134e..e641dbbea1e2 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -378,8 +378,8 @@ u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr) static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) { + const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; - u8 *dev_addr = pdata->ndev->dev_addr; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5f1fc6582d74..220dc42af31a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1731,7 +1731,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) xgene_get_port_id_acpi(dev, pdata); #endif - if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) + if (device_get_ethdev_address(dev, ndev)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index f482ced2cadd..72b5e8eb0ec7 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -165,8 +165,8 @@ static void xgene_sgmac_reset(struct xgene_enet_pdata *p) static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p) { + const u8 *dev_addr = p->ndev->dev_addr; u32 addr0, addr1; - u8 *dev_addr = p->ndev->dev_addr; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 304b5d43f236..86607b79c09f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -207,8 +207,8 @@ static void xgene_pcs_reset(struct xgene_enet_pdata *pdata) static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) { + const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; - u8 *dev_addr = pdata->ndev->dev_addr; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index a989d2df59ad..9a650d1c1bdd 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -308,7 +308,7 @@ bmac_init_registers(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile unsigned short regValue; - unsigned short *pWord16; + const unsigned short *pWord16; int i; /* XXDEBUG(("bmac: enter init_registers\n")); */ @@ -371,7 +371,7 @@ bmac_init_registers(struct net_device *dev) bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ - pWord16 = (unsigned short *)dev->dev_addr; + pWord16 = (const unsigned short *)dev->dev_addr; bmwrite(dev, MADD0, *pWord16++); bmwrite(dev, MADD1, *pWord16++); bmwrite(dev, MADD2, *pWord16); @@ -521,19 +521,16 @@ static int bmac_resume(struct macio_dev *mdev) static int bmac_set_address(struct net_device *dev, void *addr) { struct bmac_data *bp = netdev_priv(dev); - unsigned char *p = addr; - unsigned short *pWord16; + const unsigned short *pWord16; unsigned long flags; - int i; XXDEBUG(("bmac: enter set_address\n")); spin_lock_irqsave(&bp->lock, flags); - for (i = 0; i < 6; ++i) { - dev->dev_addr[i] = p[i]; - } + eth_hw_addr_set(dev, addr); + /* load up the hardware address */ - pWord16 = (unsigned short *)dev->dev_addr; + pWord16 = (const unsigned short *)dev->dev_addr; bmwrite(dev, MADD0, *pWord16++); bmwrite(dev, MADD1, *pWord16++); bmwrite(dev, MADD2, *pWord16); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index bed481816ea3..062a300a566a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -217,7 +217,7 @@ struct aq_hw_ops { int (*hw_ring_tx_head_update)(struct aq_hw_s *self, struct aq_ring_s *aq_ring); - int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); + int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr); int (*hw_soft_reset)(struct aq_hw_s *self); @@ -226,7 +226,7 @@ struct aq_hw_ops { int (*hw_reset)(struct aq_hw_s *self); - int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr); + int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr); int (*hw_start)(struct aq_hw_s *self); @@ -373,7 +373,7 @@ struct aq_fw_ops { int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable); int (*set_power)(struct aq_hw_s *self, unsigned int power_state, - u8 *mac); + const u8 *mac); int (*send_fw_request)(struct aq_hw_s *self, const struct hw_fw_request_iface *fw_req, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 4a6dfac857ca..02058fe79f52 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -35,7 +35,7 @@ static int aq_apply_macsec_cfg(struct aq_nic_s *nic); static int aq_apply_secy_cfg(struct aq_nic_s *nic, const struct macsec_secy *secy); -static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac) +static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac) { u32 tmp[2] = { 0 }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 6c049864dac0..694aa70bcafe 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -332,7 +332,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) { static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; - ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); + eth_hw_addr_set(self->ndev, mac_addr_permanent); } #endif diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 611875ef2cd1..4625ccb79499 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -322,7 +322,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } -static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) { unsigned int h = 0U; unsigned int l = 0U; @@ -348,7 +348,7 @@ err_exit: return err; } -static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 9f1b15077e7d..d875ce3ec759 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -533,7 +533,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } -int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) { unsigned int h = 0U; unsigned int l = 0U; @@ -558,7 +558,7 @@ err_exit: return err; } -static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index d8db972113ec..5298846dd9f7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -58,7 +58,7 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring); void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self); -int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr); +int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr); int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc); int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 404cbf60d3f2..fc0e66006644 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -944,7 +944,7 @@ u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self) } static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled, - u8 *mac) + const u8 *mac) { struct hw_atl_utils_fw_rpc *prpc = NULL; unsigned int rpc_size = 0U; @@ -987,7 +987,7 @@ err_exit: } static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state, - u8 *mac) + const u8 *mac) { struct hw_atl_utils_fw_rpc *prpc = NULL; unsigned int rpc_size = 0U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index ee0c22d04935..eac631c45c56 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -358,7 +358,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) return 0; } -static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac) +static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac) { struct hw_atl_utils_fw_rpc *rpc = NULL; struct offload_info *info = NULL; @@ -404,7 +404,7 @@ err_exit: } static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state, - u8 *mac) + const u8 *mac) { int err = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index 92f64048bf69..c98708bb044c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -516,7 +516,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) return aq_hw_err_from_flags(self); } -static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr) +static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl2_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 92a79c4ffa2c..0a67612af228 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -26,7 +26,7 @@ config ARC_EMAC_CORE config ARC_EMAC tristate "ARC EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET + depends on OF_IRQ depends on ARC || COMPILE_TEST help On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x @@ -36,7 +36,7 @@ config ARC_EMAC config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && REGULATOR + depends on OF_IRQ && REGULATOR depends on ARCH_ROCKCHIP || COMPILE_TEST help Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 38c288ec9059..c642c3d3e600 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -773,7 +773,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); arc_emac_set_address_internal(ndev); @@ -941,7 +941,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) } /* Get MAC address from device tree */ - err = of_get_mac_address(dev->of_node, ndev->dev_addr); + err = of_get_ethdev_address(dev->of_node, ndev); if (err) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 54cdafdd067d..9acf589b1178 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -151,10 +151,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv) data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset", GPIOD_OUT_LOW); if (IS_ERR(data->reset_gpio)) { - error = PTR_ERR(data->reset_gpio); - dev_err(priv->dev, "Failed to request gpio: %d\n", error); mdiobus_free(bus); - return error; + return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio), + "Failed to request gpio\n"); } of_property_read_u32(np, "phy-reset-duration", &data->msec); @@ -166,9 +165,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv) error = of_mdiobus_register(bus, priv->dev->of_node); if (error) { - dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); mdiobus_free(bus); - return error; + return dev_err_probe(priv->dev, error, + "cannot register MDIO bus %s\n", bus->name); } return 0; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 02ae98aabf91..ada3a9f0c8c8 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1968,10 +1968,10 @@ static int ag71xx_probe(struct platform_device *pdev) ag->stop_desc->ctrl = 0; ag->stop_desc->next = (u32)ag->stop_desc_dma; - err = of_get_mac_address(np, ndev->dev_addr); + err = of_get_ethdev_address(np, ndev); if (err) { netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); } err = of_get_phy_mode(np, &ag->phy_if_mode); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4ea157efca86..4ad3fc72e74e 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -607,7 +607,7 @@ static int alx_set_mac_address(struct net_device *netdev, void *data) if (netdev->addr_assign_type & NET_ADDR_RANDOM) netdev->addr_assign_type ^= NET_ADDR_RANDOM; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); alx_set_macaddr(hw, hw->mac_addr); @@ -1832,7 +1832,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); - memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, hw->mac_addr); memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); hw->mdio.prtad = 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3b51b172b317..da595242bc13 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -482,7 +482,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); @@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, buffer_info->skb = NULL; buffer_info->length = 0; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); - netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); + netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed"); break; } buffer_info->dma = mapping; @@ -2662,10 +2662,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* enable device (incl. PCI PM wakeup and hotplug setup) */ err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1c chip can DMA to 64-bit addresses, but it uses a single @@ -2769,7 +2767,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ netdev->addr_assign_type = NET_ADDR_RANDOM; } - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (netif_msg_probe(adapter)) dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 753973ac922e..56e5f440e666 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -374,7 +374,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1e_hw_set_mac_addr(&adapter->hw); @@ -2297,10 +2297,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err = 0; err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1e chip can DMA to 64-bit addresses, but it uses a single @@ -2392,7 +2390,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); INIT_WORK(&adapter->reset_task, atl1e_reset_task); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 68f6c0bbd945..b4c9e805e981 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3027,7 +3027,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* mark random mac */ netdev->addr_assign_type = NET_ADDR_RANDOM; } - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index b69298ddb647..bbc4d7b08a49 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -931,7 +931,7 @@ static int atl2_set_mac(struct net_device *netdev, void *p) if (netif_running(netdev)) return -EBUSY; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl2_set_mac_addr(&adapter->hw); @@ -1405,7 +1405,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* copy the MAC address out of the EEPROM */ atl2_read_mac_addr(&adapter->hw); - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; goto err_eeprom; diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index 0941d07d0833..e8cfbf4ff1b5 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -69,7 +69,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atlx_set_mac_addr(&adapter->hw); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index fa784953c601..969591bbc066 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -218,7 +218,8 @@ static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index data[1] = (val >> 0) & 0xFF; } -static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +static inline void __b44_cam_write(struct b44 *bp, + const unsigned char *data, int index) { u32 val; @@ -1200,7 +1201,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { - /* Allocation may have failed due to pci_alloc_consistent + /* Allocation may have failed due to dma_alloc_coherent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *rx_ring; @@ -1383,7 +1384,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&bp->lock); @@ -1507,7 +1508,8 @@ static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) } } -static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) +static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask, + int offset) { int magicsync = 6; int k, j, len = offset; @@ -2171,7 +2173,7 @@ static int b44_get_invariants(struct b44 *bp) * valid PHY address. */ bp->phy_addr &= 0x1F; - memcpy(bp->dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, addr); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ pr_err("Invalid MAC address found in EEPROM\n"); diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 02a569500234..7cc5213c575a 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -170,7 +170,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet, goto err_free_buf_descs; } - ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL); + ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL); if (!ring->slots) goto err_free_buf_descs; @@ -715,7 +715,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev) return err; SET_NETDEV_DEV(netdev, &pdev->dev); - err = of_get_mac_address(dev->of_node, netdev->dev_addr); + err = of_get_ethdev_address(dev->of_node, netdev); if (err) eth_hw_addr_random(netdev); netdev->netdev_ops = &bcm4908_enet_netdev_ops; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index d56886300ecf..a568994a03a6 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -670,7 +670,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) u32 val; priv = netdev_priv(dev); - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); /* use perfect match register 0 to store my mac address */ val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | @@ -1762,7 +1762,7 @@ static int bcm_enet_probe(struct platform_device *pdev) pd = dev_get_platdata(&pdev->dev); if (pd) { - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac_addr); priv->has_phy = pd->has_phy; priv->phy_id = pd->phy_id; priv->has_phy_interrupt = pd->has_phy_interrupt; @@ -2665,7 +2665,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) pd = dev_get_platdata(&pdev->dev); if (pd) { - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac_addr); memcpy(priv->used_ports, pd->used_ports, sizeof(pd->used_ports)); priv->num_ports = pd->num_ports; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7fa1b695400d..40933bf5a710 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1818,7 +1818,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) } static void umac_set_hw_addr(struct bcm_sysport_priv *priv, - unsigned char *addr) + const unsigned char *addr) { u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; @@ -1850,7 +1850,7 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); /* interface is disabled, changes to MAC will be reflected on next * open call @@ -2555,7 +2555,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) } /* Initialize netdevice members */ - ret = of_get_mac_address(dn, dev->dev_addr); + ret = of_get_ethdev_address(dn, dev); if (ret) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 6ce80cbcb48e..086739e4f40a 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -10,6 +10,7 @@ #include <linux/bcma/bcma.h> #include <linux/brcmphy.h> +#include <linux/of_mdio.h> #include "bgmac.h" static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, @@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) { struct bcma_device *core = bgmac->bcma.core; struct mii_bus *mii_bus; + struct device_node *np; int err; mii_bus = mdiobus_alloc(); @@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) mii_bus->parent = &core->dev; mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - err = mdiobus_register(mii_bus); + np = of_get_child_by_name(core->dev.of_node, "mdio"); + + err = of_mdiobus_register(mii_bus, np); if (err) { dev_err(&core->dev, "Registration of mii bus failed\n"); goto err_free_bus; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 9513cfb5ba58..e6f48786949c 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -11,6 +11,7 @@ #include <linux/bcma/bcma.h> #include <linux/brcmphy.h> #include <linux/etherdevice.h> +#include <linux/of_mdio.h> #include <linux/of_net.h> #include "bgmac.h" @@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac *bgmac) struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; + /* DT info should be the most accurate */ + phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node, + bgmac_adjust_link); + if (phy_dev) + return 0; + /* Connect to the PHY */ - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, - bgmac->phyaddr); - phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(phy_dev)) { - dev_err(bgmac->dev, "PHY connection failed\n"); - return PTR_ERR(phy_dev); + if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) { + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, + bgmac->phyaddr); + phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phy_dev)) { + dev_err(bgmac->dev, "PHY connection failed\n"); + return PTR_ERR(phy_dev); + } + + return 0; } - return 0; + /* Assume a fixed link to the switch port */ + return bgmac_phy_connect_direct(bgmac); } static const struct bcma_device_id bgmac_bcma_tbl[] = { @@ -128,7 +140,7 @@ static int bgmac_probe(struct bcma_device *core) bcma_set_drvdata(core, bgmac); - err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); + err = of_get_ethdev_address(bgmac->dev->of_node, bgmac->net_dev); if (err == -EPROBE_DEFER) return err; @@ -150,7 +162,7 @@ static int bgmac_probe(struct bcma_device *core) err = -ENOTSUPP; goto err; } - ether_addr_copy(bgmac->net_dev->dev_addr, mac); + eth_hw_addr_set(bgmac->net_dev, mac); } /* On BCM4706 we need common core to access PHY */ @@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_device *core) bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset; bgmac->get_bus_clock = bcma_bgmac_get_bus_clock; bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32; - if (bgmac->mii_bus) - bgmac->phy_connect = bcma_phy_connect; - else - bgmac->phy_connect = bgmac_phy_connect_direct; + bgmac->phy_connect = bcma_phy_connect; err = bgmac_enet_probe(bgmac); if (err) diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index df8ff839cc62..c6412c523637 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -191,7 +191,7 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->dev = &pdev->dev; bgmac->dma_dev = &pdev->dev; - ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); + ret = of_get_ethdev_address(np, bgmac->net_dev); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index fe4d99abd548..7b525c65bacb 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -768,7 +768,7 @@ static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set, udelay(2); } -static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) +static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr) { u32 tmp; @@ -1241,7 +1241,7 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) if (ret < 0) return ret; - ether_addr_copy(net_dev->dev_addr, sa->sa_data); + eth_hw_addr_set(net_dev, sa->sa_data); bgmac_write_mac_address(bgmac, net_dev->dev_addr); eth_commit_mac_addr_change(net_dev, addr); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8c83973adca5..babc955ba64e 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -2704,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) } static void -bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) +bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos) { u32 val; @@ -7910,7 +7910,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); @@ -8574,7 +8574,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_kdump_kernel()) bnx2_wait_dma_complete(bp); - memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, bp->mac_addr); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e789430f407c..2b06d78baa08 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1994,7 +1994,7 @@ int bnx2x_idle_chk(struct bnx2x *bp); * operation has been successfully scheduled and a negative - if a requested * operations has failed. */ -int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, +int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b5d954cb409a..e8e8c2d593c5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4336,7 +4336,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) return rc; } - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) rc = bnx2x_set_eth_mac(bp, true); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ae87296ae1ff..27e712178f95 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8417,7 +8417,7 @@ alloc_mem_err: * Init service functions */ -int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, +int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags) { @@ -9146,7 +9146,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) else if (bp->wol) { u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u8 *mac_addr = bp->dev->dev_addr; + const u8 *mac_addr = bp->dev->dev_addr; struct pci_dev *pdev = bp->pdev; u32 val; u16 pmc; @@ -11790,7 +11790,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) * as the SAN mac was copied from the primary MAC. */ if (IS_MF_FCOE_AFEX(bp)) - memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); + eth_hw_addr_set(bp->dev, fip_mac); } else { val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_upper); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6fbf735fca31..74a8931ce1d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3058,7 +3058,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID && !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { /* update new mac to net device */ - memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); + eth_hw_addr_set(bp->dev, bulletin->mac); } if (bulletin->valid_bitmap & (1 << LINK_VALID)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 966d5722c5e2..8c2cf5519787 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -508,7 +508,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading); -int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, + bool set); int bnx2x_vfpf_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *params); int bnx2x_vfpf_set_mcast(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index ea0e9394f898..c9129b9ba446 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -384,9 +384,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) sizeof(bp->fw_ver)); if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) - memcpy(bp->dev->dev_addr, - bp->acquire_resp.resc.current_mac_addr, - ETH_ALEN); + eth_hw_addr_set(bp->dev, + bp->acquire_resp.resc.current_mac_addr); out: bnx2x_vfpf_finalize(bp, &req->first_tlv); @@ -722,7 +721,7 @@ out: } /* request pf to add a mac for the vf */ -int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) +int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set) { struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; @@ -767,7 +766,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) "vfpf SET MAC failed. Check bulletin board for new posts\n"); /* copy mac from bulletin to device */ - memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); + eth_hw_addr_set(bp->dev, bulletin.mac); /* check if bulletin board was updated */ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 62f84cc91e4d..66263aa0d96b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4869,7 +4869,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, #endif static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, - u8 *mac_addr) + const u8 *mac_addr) { struct hwrm_cfa_l2_filter_alloc_output *resp; struct hwrm_cfa_l2_filter_alloc_input *req; @@ -6366,7 +6366,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) if (rx_rings != bp->rx_nr_rings) { netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", rx_rings, bp->rx_nr_rings); - if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && + if (netif_is_rxfh_configured(bp->dev) && (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != bnxt_get_nr_rss_ctxs(bp, rx_rings) || bnxt_get_max_rss_ring(bp) >= rx_rings)) { @@ -12369,7 +12369,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (rc) return rc; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) { bnxt_close_nic(bp, false, false); rc = bnxt_open_nic(bp, false, false); @@ -13103,7 +13103,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp) int rc = 0; if (BNXT_PF(bp)) { - memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, bp->pf.mac_addr); } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; @@ -13111,7 +13111,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp) if (is_valid_ether_addr(vf->mac_addr)) { /* overwrite netdev dev_addr with admin VF MAC */ - memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, vf->mac_addr); /* Older PF driver or firmware may not approve this * correctly. */ @@ -13370,7 +13370,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_inv_fw_health_reg(bp); - bnxt_dl_register(bp); + rc = bnxt_dl_register(bp); + if (rc) + goto init_err_dl; rc = register_netdev(dev); if (rc) @@ -13390,6 +13392,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_err_cleanup: bnxt_dl_unregister(bp); +init_err_dl: bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 9576547df4ab..951c0c00cc95 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -134,7 +134,7 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!bp->dl || !health) + if (!health) return; if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) @@ -188,7 +188,7 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!bp->dl || !health) + if (!health) return; if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && @@ -736,9 +736,6 @@ static const struct devlink_param bnxt_dl_params[] = { NULL), }; -static const struct devlink_param bnxt_dl_port_params[] = { -}; - static int bnxt_dl_params_register(struct bnxt *bp) { int rc; @@ -748,22 +745,10 @@ static int bnxt_dl_params_register(struct bnxt *bp) rc = devlink_params_register(bp->dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); - if (rc) { + if (rc) netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", rc); - return rc; - } - rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - if (rc) { - netdev_err(bp->dev, "devlink_port_params_register failed\n"); - devlink_params_unregister(bp->dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - return rc; - } - devlink_params_publish(bp->dl); - - return 0; + return rc; } static void bnxt_dl_params_unregister(struct bnxt *bp) @@ -773,14 +758,13 @@ static void bnxt_dl_params_unregister(struct bnxt *bp) devlink_params_unregister(bp->dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); } int bnxt_dl_register(struct bnxt *bp) { const struct devlink_ops *devlink_ops; struct devlink_port_attrs attrs = {}; + struct bnxt_dl *bp_dl; struct devlink *dl; int rc; @@ -795,21 +779,17 @@ int bnxt_dl_register(struct bnxt *bp) return -ENOMEM; } - bnxt_link_bp_to_dl(bp, dl); + bp->dl = dl; + bp_dl = devlink_priv(dl); + bp_dl->bp = bp; /* Add switchdev eswitch mode setting, if SRIOV supported */ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && bp->hwrm_spec_code > 0x10803) bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; - rc = devlink_register(dl); - if (rc) { - netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); - goto err_dl_free; - } - if (!BNXT_PF(bp)) - return 0; + goto out; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = bp->pf.port_id; @@ -819,21 +799,20 @@ int bnxt_dl_register(struct bnxt *bp) rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed\n"); - goto err_dl_unreg; + goto err_dl_free; } rc = bnxt_dl_params_register(bp); if (rc) goto err_dl_port_unreg; +out: + devlink_register(dl); return 0; err_dl_port_unreg: devlink_port_unregister(&bp->dl_port); -err_dl_unreg: - devlink_unregister(dl); err_dl_free: - bnxt_link_bp_to_dl(bp, NULL); devlink_free(dl); return rc; } @@ -842,13 +821,10 @@ void bnxt_dl_unregister(struct bnxt *bp) { struct devlink *dl = bp->dl; - if (!dl) - return; - + devlink_unregister(dl); if (BNXT_PF(bp)) { bnxt_dl_params_unregister(bp); devlink_port_unregister(&bp->dl_port); } - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index d889f240da2b..406dc655a5fc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -20,19 +20,6 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) return ((struct bnxt_dl *)devlink_priv(dl))->bp; } -/* To clear devlink pointer from bp, pass NULL dl */ -static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) -{ - bp->dl = dl; - - /* add a back pointer in dl to bp */ - if (dl) { - struct bnxt_dl *bp_dl = devlink_priv(dl); - - bp_dl->bp = bp; - } -} - #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 #define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 #define NVM_OFF_IGNORE_ARI 164 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7260910e75fb..fbb56b1f70fd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -909,7 +909,7 @@ static int bnxt_set_channels(struct net_device *dev, if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && - (dev->priv_flags & IFF_RXFH_CONFIGURED)) { + netif_is_rxfh_configured(dev)) { netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 70d8ca3039dc..1d177fed44a6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1151,7 +1151,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) } } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) { struct hwrm_func_vf_cfg_input *req; int rc = 0; @@ -1217,7 +1217,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) /* overwrite netdev dev_addr with admin VF MAC */ if (is_valid_ether_addr(bp->vf.mac_addr)) - memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, bp->vf.mac_addr); update_vf_mac_exit: hwrm_req_drop(bp, req); if (inform_pf) @@ -1246,7 +1246,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) { } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) { return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 995535e4c11b..9a4bacba477b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -41,5 +41,5 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); -int bnxt_approve_mac(struct bnxt *, u8 *, bool); +int bnxt_approve_mac(struct bnxt *, const u8 *, bool); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 9401936b74fa..8eb28e088582 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -475,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->features |= pf_dev->features; bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, dev->perm_addr); - ether_addr_copy(dev->dev_addr, dev->perm_addr); + eth_hw_addr_set(dev, dev->perm_addr); /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */ if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) dev->max_mtu = max_mtu; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 23c7595d2a1d..ed53859b6f7d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev, return 0; } +static void bcmgenet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv; + u32 umac_cmd; + + priv = netdev_priv(dev); + + epause->autoneg = priv->autoneg_pause; + + if (netif_carrier_ok(dev)) { + /* report active state when link is up */ + umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); + epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); + epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); + } else { + /* otherwise report stored settings */ + epause->tx_pause = priv->tx_pause; + epause->rx_pause = priv->rx_pause; + } +} + +static int bcmgenet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev) + return -ENODEV; + + if (!phy_validate_pause(dev->phydev, epause)) + return -EINVAL; + + priv->autoneg_pause = !!epause->autoneg; + priv->tx_pause = !!epause->tx_pause; + priv->rx_pause = !!epause->rx_pause; + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + return 0; +} + /* standard ethtool support functions. */ enum bcmgenet_stat_type { BCMGENET_STAT_NETDEV = -1, @@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = bcmgenet_get_rxnfc, .set_rxnfc = bcmgenet_set_rxnfc, + .get_pauseparam = bcmgenet_get_pauseparam, + .set_pauseparam = bcmgenet_set_pauseparam, }; /* Power down the unimac, based on mode. */ @@ -3222,7 +3266,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) } static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, - unsigned char *addr) + const unsigned char *addr) { bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); @@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq1; } + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + bcmgenet_netif_start(dev); netif_tx_start_all_queues(dev); @@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev) */ cancel_work_sync(&priv->bcmgenet_irq_work); - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; - /* tx reclaim */ bcmgenet_tx_reclaim_all(dev); bcmgenet_fini_dma(priv); @@ -3519,7 +3560,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) #define MAX_MDF_FILTER 17 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, - unsigned char *addr, + const unsigned char *addr, int *i) { bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], @@ -3592,7 +3633,7 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -3950,6 +3991,11 @@ static int bcmgenet_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); + /* Set default pause parameters */ + priv->autoneg_pause = 1; + priv->tx_pause = 1; + priv->rx_pause = 1; + SET_NETDEV_DEV(dev, &pdev->dev); dev_set_drvdata(&pdev->dev, dev); dev->watchdog_timeo = 2 * HZ; @@ -4036,9 +4082,9 @@ static int bcmgenet_probe(struct platform_device *pdev) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); if (pd && !IS_ERR_OR_NULL(pd->mac_address)) - ether_addr_copy(dev->dev_addr, pd->mac_address); + eth_hw_addr_set(dev, pd->mac_address); else - if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN)) + if (device_get_ethdev_address(&pdev->dev, dev)) if (has_acpi_companion(&pdev->dev)) bcmgenet_get_hw_addr(priv, dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0a6d91b0f0aa..1cc2838e52c6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -594,6 +594,9 @@ struct bcmgenet_priv { /* other misc variables */ struct bcmgenet_hw_params *hw_params; + unsigned autoneg_pause:1; + unsigned tx_pause:1; + unsigned rx_pause:1; /* MDIO bus variables */ wait_queue_head_t wq; @@ -606,10 +609,6 @@ struct bcmgenet_priv { bool clk_eee_enabled; /* PHY device variables */ - int old_link; - int old_speed; - int old_duplex; - int old_pause; phy_interface_t phy_interface; int phy_addr; int ext_phy; @@ -690,6 +689,7 @@ int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 89d16c587bb7..ad56f54eda0a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -25,92 +25,80 @@ #include "bcmgenet.h" -/* setup netdev link state when PHY link status change and - * update UMAC and RGMII block when link up - */ -void bcmgenet_mii_setup(struct net_device *dev) +static void bcmgenet_mac_config(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; - bool status_changed = false; - if (priv->old_link != phydev->link) { - status_changed = true; - priv->old_link = phydev->link; - } + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; - if (phydev->link) { - /* check speed/duplex/pause changes */ - if (priv->old_speed != phydev->speed) { - status_changed = true; - priv->old_speed = phydev->speed; - } + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) { + cmd_bits |= CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + } else { + /* pause capability defaults to Symmetric */ + if (priv->autoneg_pause) { + bool tx_pause = 0, rx_pause = 0; - if (priv->old_duplex != phydev->duplex) { - status_changed = true; - priv->old_duplex = phydev->duplex; - } + if (phydev->autoneg) + phy_get_pause(phydev, &tx_pause, &rx_pause); - if (priv->old_pause != phydev->pause) { - status_changed = true; - priv->old_pause = phydev->pause; + if (!tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + if (!rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; } - /* done if nothing has changed */ - if (!status_changed) - return; - - /* speed */ - if (phydev->speed == SPEED_1000) - cmd_bits = CMD_SPEED_1000; - else if (phydev->speed == SPEED_100) - cmd_bits = CMD_SPEED_100; - else - cmd_bits = CMD_SPEED_10; - cmd_bits <<= CMD_SPEED_SHIFT; - - /* duplex */ - if (phydev->duplex != DUPLEX_FULL) - cmd_bits |= CMD_HD_EN; - - /* pause capability */ - if (!phydev->pause) - cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; - - /* - * Program UMAC and RGMII block based on established - * link speed, duplex, and pause. The speed set in - * umac->cmd tell RGMII block which clock to use for - * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). - * Receive clock is provided by the PHY. - */ - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg &= ~OOB_DISABLE; - reg |= RGMII_LINK; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + /* Manual override */ + if (!priv->rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + if (!priv->tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + } - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | - CMD_HD_EN | - CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); - reg |= cmd_bits; - if (reg & CMD_SW_RESET) { - reg &= ~CMD_SW_RESET; - bcmgenet_umac_writel(priv, reg, UMAC_CMD); - udelay(2); - reg |= CMD_TX_EN | CMD_RX_EN; - } + /* Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - } else { - /* done if nothing has changed */ - if (!status_changed) - return; - - /* needed for MoCA fixed PHY to reflect correct link status */ - netif_carrier_off(dev); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct phy_device *phydev = dev->phydev; + + if (phydev->link) + bcmgenet_mac_config(dev); phy_print_status(phydev); } @@ -130,6 +118,21 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev, return 0; } +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx) +{ + struct phy_device *phydev = dev->phydev; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising, + rx | tx); + phy_start_aneg(phydev); + + mutex_lock(&phydev->lock); + if (phydev->link) + bcmgenet_mac_config(dev); + mutex_unlock(&phydev->lock); +} + void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -286,23 +289,53 @@ int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct device_node *dn = kdev->of_node; + phy_interface_t phy_iface = priv->phy_interface; struct phy_device *phydev; - u32 phy_flags = 0; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; int ret; /* Communicate the integrated PHY revision */ if (priv->internal_phy) phy_flags = priv->gphy_rev; - /* Initialize link state variables that bcmgenet_mii_setup() uses */ - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; + /* This is an ugly quirk but we have not been correctly interpreting + * the phy_interface values and we have done that across different + * drivers, so at least we are consistent in our mistakes. + * + * When the Generic PHY driver is in use either the PHY has been + * strapped or programmed correctly by the boot loader so we should + * stick to our incorrect interpretation since we have validated it. + * + * Now when a dedicated PHY driver is in use, we need to reverse the + * meaning of the phy_interface_mode values to something that the PHY + * driver will interpret and act on such that we have two mistakes + * canceling themselves so to speak. We only do this for the two + * modes that GENET driver officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other + * modes are not *officially* supported with the boot loader and the + * scripted environment generating Device Tree blobs for those + * platforms. + * + * Note that internal PHY, MoCA and fixed-link configurations are not + * affected because they use different phy_interface_t values or the + * Generic PHY driver. + */ + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } if (dn) { phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, - phy_flags, priv->phy_interface); + phy_flags, phy_iface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -332,7 +365,7 @@ int bcmgenet_mii_probe(struct net_device *dev) phydev->dev_flags = phy_flags; ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, - priv->phy_interface); + phy_iface); if (ret) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -350,8 +383,6 @@ int bcmgenet_mii_probe(struct net_device *dev) return ret; } - linkmode_copy(phydev->advertising, phydev->supported); - /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs. On GENETv5 there is a hardware issue * that prevents the signaling of link UP interrupts when diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5e0e0e70d801..e9518b98914b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3942,7 +3942,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp) } /* tp->lock is held. */ -static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) +static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, + int index) { u32 addr_high, addr_low; @@ -9366,7 +9367,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) return 0; @@ -10273,8 +10274,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_asic_rev(tp) == ASIC_REV_5705 && tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { - if (tg3_flag(tp, TSO_CAPABLE) && - tg3_asic_rev(tp) == ASIC_REV_5705) { + if (tg3_flag(tp, TSO_CAPABLE)) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !tg3_flag(tp, IS_5788)) { @@ -11213,12 +11213,8 @@ static void tg3_reset_task(struct work_struct *work) } tg3_netif_start(tp); - tg3_full_unlock(tp); - - if (!err) - tg3_phy_start(tp); - + tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); out: rtnl_unlock(); @@ -16915,19 +16911,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) return err; } -static int tg3_get_device_address(struct tg3 *tp) +static int tg3_get_device_address(struct tg3 *tp, u8 *addr) { - struct net_device *dev = tp->dev; u32 hi, lo, mac_offset; int addr_ok = 0; int err; - if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr)) + if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) return 0; if (tg3_flag(tp, IS_SSB_CORE)) { - err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); - if (!err && is_valid_ether_addr(&dev->dev_addr[0])) + err = ssb_gige_get_macaddr(tp->pdev, addr); + if (!err && is_valid_ether_addr(addr)) return 0; } @@ -16951,41 +16946,41 @@ static int tg3_get_device_address(struct tg3 *tp) /* First try to get it from MAC address mailbox. */ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); if ((hi >> 16) == 0x484b) { - dev->dev_addr[0] = (hi >> 8) & 0xff; - dev->dev_addr[1] = (hi >> 0) & 0xff; + addr[0] = (hi >> 8) & 0xff; + addr[1] = (hi >> 0) & 0xff; tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); - dev->dev_addr[2] = (lo >> 24) & 0xff; - dev->dev_addr[3] = (lo >> 16) & 0xff; - dev->dev_addr[4] = (lo >> 8) & 0xff; - dev->dev_addr[5] = (lo >> 0) & 0xff; + addr[2] = (lo >> 24) & 0xff; + addr[3] = (lo >> 16) & 0xff; + addr[4] = (lo >> 8) & 0xff; + addr[5] = (lo >> 0) & 0xff; /* Some old bootcode may report a 0 MAC address in SRAM */ - addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + addr_ok = is_valid_ether_addr(addr); } if (!addr_ok) { /* Next, try NVRAM. */ if (!tg3_flag(tp, NO_NVRAM) && !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { - memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); - memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + memcpy(&addr[0], ((char *)&hi) + 2, 2); + memcpy(&addr[2], (char *)&lo, sizeof(lo)); } /* Finally just fetch it out of the MAC control regs. */ else { hi = tr32(MAC_ADDR_0_HIGH); lo = tr32(MAC_ADDR_0_LOW); - dev->dev_addr[5] = lo & 0xff; - dev->dev_addr[4] = (lo >> 8) & 0xff; - dev->dev_addr[3] = (lo >> 16) & 0xff; - dev->dev_addr[2] = (lo >> 24) & 0xff; - dev->dev_addr[1] = hi & 0xff; - dev->dev_addr[0] = (hi >> 8) & 0xff; + addr[5] = lo & 0xff; + addr[4] = (lo >> 8) & 0xff; + addr[3] = (lo >> 16) & 0xff; + addr[2] = (lo >> 24) & 0xff; + addr[1] = hi & 0xff; + addr[0] = (hi >> 8) & 0xff; } } - if (!is_valid_ether_addr(&dev->dev_addr[0])) + if (!is_valid_ether_addr(addr)) return -EINVAL; return 0; } @@ -17561,6 +17556,7 @@ static int tg3_init_one(struct pci_dev *pdev, char str[40]; u64 dma_mask, persist_dma_mask; netdev_features_t features = 0; + u8 addr[ETH_ALEN] __aligned(2); err = pci_enable_device(pdev); if (err) { @@ -17783,12 +17779,13 @@ static int tg3_init_one(struct pci_dev *pdev, tp->rx_pending = 63; } - err = tg3_get_device_address(tp); + err = tg3_get_device_address(tp, addr); if (err) { dev_err(&pdev->dev, "Could not obtain valid ethernet address, aborting\n"); goto err_out_apeunmap; } + eth_hw_addr_set(dev, addr); intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ba47777d9cff..bbdc829c3524 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -875,7 +875,7 @@ bnad_set_netdev_perm_addr(struct bnad *bnad) ether_addr_copy(netdev->perm_addr, bnad->perm_addr); if (is_zero_ether_addr(netdev->dev_addr)) - ether_addr_copy(netdev->dev_addr, bnad->perm_addr); + eth_hw_addr_set(netdev, bnad->perm_addr); } /* Control Path Handlers */ @@ -3249,7 +3249,7 @@ bnad_set_mac_address(struct net_device *netdev, void *addr) err = bnad_mac_addr_set_locked(bnad, sa->sa_data); if (!err) - ether_addr_copy(netdev->dev_addr, sa->sa_data); + eth_hw_addr_set(netdev, sa->sa_data); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -3515,7 +3515,6 @@ static void bnad_uninit(struct bnad *bnad) { if (bnad->work_q) { - flush_workqueue(bnad->work_q); destroy_workqueue(bnad->work_q); bnad->work_q = NULL; } diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d8d87213697c..5620b97b3482 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -243,9 +243,11 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 -#define MACB_SRTSM_OFFSET 15 -#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ +#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */ +#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ #define MACB_OSSMODE_SIZE 1 +#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */ +#define MACB_MIIONRGMII_SIZE 1 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -713,6 +715,7 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_MIIONRGMII 0x00000200 #define MACB_CAPS_CLK_HW_CHG 0x04000000 #define MACB_CAPS_MACB_IS_EMAC 0x08000000 #define MACB_CAPS_FIFO_MODE 0x10000000 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d13fb1d31821..029dea2873e3 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -313,7 +313,7 @@ static void macb_get_hwaddr(struct macb *bp) addr[5] = (top >> 8) & 0xff; if (is_valid_ether_addr(addr)) { - memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + eth_hw_addr_set(bp->dev, addr); return; } } @@ -547,13 +547,8 @@ static void macb_validate(struct phylink_config *config, if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && (state->interface == PHY_INTERFACE_MODE_NA || state->interface == PHY_INTERFACE_MODE_10GBASER)) { - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseER_Full); + phylink_set_10g_modes(mask); phylink_set(mask, 10000baseKR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseT_Full); if (state->interface != PHY_INTERFACE_MODE_NA) goto out; } @@ -684,6 +679,9 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { ctrl |= GEM_BIT(PCSSEL); ncr |= GEM_BIT(ENABLE_HS_MAC); + } else if (bp->caps & MACB_CAPS_MIIONRGMII && + bp->phy_interface == PHY_INTERFACE_MODE_MII) { + ncr |= MACB_BIT(MIIONRGMII); } } @@ -4594,7 +4592,8 @@ static const struct macb_config zynq_config = { }; static const struct macb_config sama7g5_gem_config = { - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | + MACB_CAPS_MIIONRGMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4602,7 +4601,8 @@ static const struct macb_config sama7g5_gem_config = { }; static const struct macb_config sama7g5_emac_config = { - .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | + MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4774,7 +4774,7 @@ static int macb_probe(struct platform_device *pdev) if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) bp->rx_intr_mask |= MACB_BIT(RXUBR); - err = of_get_mac_address(np, bp->dev->dev_addr); + err = of_get_ethdev_address(np, bp->dev); if (err == -EPROBE_DEFER) goto err_out_free_netdev; else if (err) diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index c2e1f163bb14..095c5a2144a7 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, return NULL; } -static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); unsigned long flags; @@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) u32 secl, sech; spin_lock_irqsave(&bp->tsu_clk_lock, flags); + ptp_read_system_prets(sts); first = gem_readl(bp, TN); + ptp_read_system_postts(sts); secl = gem_readl(bp, TSL); sech = gem_readl(bp, TSH); second = gem_readl(bp, TN); @@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) /* if so, use later read & re-read seconds * (assume all done within 1s) */ + ptp_read_system_prets(sts); ts->tv_nsec = gem_readl(bp, TN); + ptp_read_system_postts(sts); secl = gem_readl(bp, TSL); sech = gem_readl(bp, TSH); } else { @@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } if (delta > TSU_NSEC_MAX_VAL) { - gem_tsu_get_time(&bp->ptp_clock_info, &now); + gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL); now = timespec64_add(now, then); gem_tsu_set_time(&bp->ptp_clock_info, @@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = { .pps = 1, .adjfine = gem_ptp_adjfine, .adjtime = gem_ptp_adjtime, - .gettime64 = gem_tsu_get_time, + .gettimex64 = gem_tsu_get_time, .settime64 = gem_tsu_set_time, .enable = gem_ptp_enable, }; @@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1, * The timestamp only contains lower few bits of seconds, * so add value from 1588 timer */ - gem_tsu_get_time(&bp->ptp_clock_info, &tsu); + gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL); /* If the top bit is set in the timestamp, * but not in 1588 timer, it has rolled over, diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index b6a066404f4b..9ad89a53c3e6 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -607,7 +607,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr) writel(value, ioaddr + XGMAC_CONTROL); } -static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, +static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr, int num) { u32 data; @@ -1479,7 +1479,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 2a0d64e5797c..73cb03266549 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -411,7 +411,7 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) if (!ether_addr_equal(netdev->dev_addr, mac)) { macaddr_changed = true; - ether_addr_copy(netdev->dev_addr, mac); + eth_hw_addr_set(netdev, mac); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac); call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev); } @@ -490,7 +490,6 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev) wq = &lio->rxq_status_wq[q_no]; if (wq->wq) { cancel_delayed_work_sync(&wq->wk.work); - flush_workqueue(wq->wq); destroy_workqueue(wq->wq); wq->wq = NULL; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 2907e13b9df6..1daf63e437ce 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1279,6 +1279,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) struct lio *lio; dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + device_lock(&oct->pci_dev->dev); + if (oct->devlink) { + devlink_unregister(oct->devlink); + devlink_free(oct->devlink); + oct->devlink = NULL; + } + device_unlock(&oct->pci_dev->dev); + if (!oct->ifcount) { dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); return 1; @@ -1300,12 +1308,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) liquidio_destroy_nic_device(oct, i); - if (oct->devlink) { - devlink_unregister(oct->devlink); - devlink_free(oct->devlink); - oct->devlink = NULL; - } - dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); return 0; } @@ -2022,7 +2024,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return -EIO; } - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); return 0; @@ -3632,7 +3634,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Copy MAC Address to OS network device structure */ - ether_addr_copy(netdev->dev_addr, mac); + eth_hw_addr_set(netdev, mac); /* By default all interfaces on a single Octeon uses the same * tx and rx queues @@ -3749,10 +3751,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } } + device_lock(&octeon_dev->pci_dev->dev); devlink = devlink_alloc(&liquidio_devlink_ops, sizeof(struct lio_devlink_priv), &octeon_dev->pci_dev->dev); if (!devlink) { + device_unlock(&octeon_dev->pci_dev->dev); dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); goto setup_nic_dev_free; } @@ -3760,15 +3764,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio_devlink = devlink_priv(devlink); lio_devlink->oct = octeon_dev; - if (devlink_register(devlink)) { - devlink_free(devlink); - dev_err(&octeon_dev->pci_dev->dev, - "devlink registration failed\n"); - goto setup_nic_dev_free; - } - octeon_dev->devlink = devlink; octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + devlink_register(devlink); + device_unlock(&octeon_dev->pci_dev->dev); return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index f6396ac64006..c607756b731f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1168,7 +1168,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return -EPERM; } - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); return 0; @@ -2148,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); /* Copy MAC Address to OS network device structure */ - ether_addr_copy(netdev->dev_addr, mac); + eth_hw_addr_set(netdev, mac); if (liquidio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq, diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 30463a6d1f8c..4e39d712e121 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1501,7 +1501,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; - result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + result = of_get_ethdev_address(pdev->dev.of_node, netdev); if (result) eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 691e1475d55e..b3d7d1afced7 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1311,9 +1311,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } err = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index d1667b759522..35021281882c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -221,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->node = mbx.nic_cfg.node_id; if (!nic->set_mac_pending) - ether_addr_copy(nic->netdev->dev_addr, - mbx.nic_cfg.mac_addr); + eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr); nic->sqs_mode = mbx.nic_cfg.sqs_mode; nic->loopback_supported = mbx.nic_cfg.loopback_supported; nic->link_up = false; @@ -1612,7 +1611,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); if (nic->pdev->msix_enabled) { if (nicvf_hw_set_mac_addr(nic, netdev)) @@ -2119,10 +2118,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = pci_enable_device(pdev); - if (err) { - dev_err(dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, DRV_NAME); if (err) { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index c36fed9c3d73..574a32f23f96 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1387,10 +1387,10 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, u8 *dst) { u8 mac[ETH_ALEN]; - u8 *addr; + int ret; - addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN); - if (!addr) { + ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac); + if (ret) { dev_err(dev, "MAC address invalid: %pM\n", mac); return -EINVAL; } @@ -1597,9 +1597,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pcim_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } err = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index d246eee4b6d5..609820e214a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -853,7 +853,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p) if (!mac->ops->macaddress_set) return -EOPNOTSUPP; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); mac->ops->macaddress_set(mac, dev->dev_addr); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h index dfa77491a910..5913eaf442b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb/gmac.h +++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h @@ -117,7 +117,7 @@ struct cmac_ops { const struct cmac_statistics *(*statistics_update)(struct cmac *, int); int (*macaddress_get)(struct cmac *, u8 mac_addr[6]); - int (*macaddress_set)(struct cmac *, u8 mac_addr[6]); + int (*macaddress_set)(struct cmac *, const u8 mac_addr[6]); }; typedef struct _cmac_instance cmac_instance; diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index c27908e66f5e..0bb37e4680c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -496,7 +496,7 @@ static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) return 0; } -static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) +static int pm3393_macaddress_set(struct cmac *cmac, const u8 ma[6]) { u32 val, lo, mid, hi, enabled = cmac->instance->enabled; diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c index 310add28fcf5..007c591b8bf5 100644 --- a/drivers/net/ethernet/chelsio/cxgb/subr.c +++ b/drivers/net/ethernet/chelsio/cxgb/subr.c @@ -1140,7 +1140,7 @@ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi) adapter->port[i].dev->name); goto error; } - memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); + eth_hw_addr_set(adapter->port[i].dev, hw_addr); init_link_config(&adapter->port[i].link_config, bi); } diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index 873c1c7b4ca0..2ad3efb550c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -379,7 +379,7 @@ static int mac_intr_clear(struct cmac *mac) } /* Expect MAC address to be in network byte order. */ -static int mac_set_address(struct cmac* mac, u8 addr[6]) +static int mac_set_address(struct cmac* mac, const u8 addr[6]) { u32 val; int port = mac->instance->index; @@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac) } hw_stats[] = { #define HW_STAT(reg, stat_name) \ - { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } + { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) } /* Rx stats */ HW_STAT(RxUnicast, RxUnicastFramesOK), diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h index b706f2fbe4f4..a309016f7f8c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/common.h +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@ -710,7 +710,7 @@ int t3_mac_enable(struct cmac *mac, int which); int t3_mac_disable(struct cmac *mac, int which); int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); -int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); +int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]); int t3_mac_set_num_ucast(struct cmac *mac, int n); const struct mac_stats *t3_mac_update_stats(struct cmac *mac); int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 38e47703f9ab..9cf9e33664e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2586,7 +2586,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); if (offload_running(adapter)) write_smt_entry(adapter, pi->port_id); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 7ff31d1026fb..53feac8da503 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include <linux/etherdevice.h> #include "common.h" #include "regs.h" #include "sge_defs.h" @@ -3758,8 +3759,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, memcpy(hw_addr, adapter->params.vpd.eth_base, 5); hw_addr[5] = adapter->params.vpd.eth_base[5] + i; - memcpy(adapter->port[i]->dev_addr, hw_addr, - ETH_ALEN); + eth_hw_addr_set(adapter->port[i], hw_addr); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1); diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c index 3af19a550372..1bdc6cad1e49 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c +++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c @@ -240,7 +240,7 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr) } /* Set one of the station's unicast MAC addresses. */ -int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) +int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]) { if (idx >= mac->nucast) return -EINVAL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ecea3cdd30b3..5657ac8cfca0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1545,7 +1545,7 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx, u8 hw_addr[]) { - ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr); + eth_hw_addr_set(adapter->port[port_idx], hw_addr); ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0d9cda4ab303..dde1cf51d0ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3468,7 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (ret < 0) return ret; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 64144b6171d7..e7b4e3ed056c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -9706,7 +9706,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) if (ret) return ret; - memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(adap->port[i], addr); j++; } return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index f55105a4112f..03cb1410d6fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -40,6 +40,7 @@ #ifndef __CXGB4VF_ADAPTER_H__ #define __CXGB4VF_ADAPTER_H__ +#include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/spinlock.h> @@ -507,7 +508,7 @@ static inline const char *port_name(struct adapter *adapter, int pidx) static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, u8 hw_addr[]) { - memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); + eth_hw_addr_set(adapter->port[pidx], hw_addr); } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 49b76fd47daa..64479c464b4e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1218,7 +1218,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) if (ret < 0) return ret; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -2902,10 +2902,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize generic PCI device state. */ err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * Reserve PCI resources for the device. If we can't get them some diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index d0c4c8b7a15a..bd7920ab166f 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1227,7 +1227,7 @@ static int set_mac_address(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n", dev->name, dev->dev_addr); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 072fac5f5d24..21ba6e893072 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -746,7 +746,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) if (dev == NULL) return NULL; - memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); + eth_hw_addr_set(dev, data->dev_addr); dev->ethtool_ops = &ep93xx_ethtool_ops; dev->netdev_ops = &ep93xx_netdev_ops; diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 6324e80960c3..84251b85fc93 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -541,7 +541,7 @@ static int set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, saddr->sa_data); netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr); /* set the Ethernet address */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 12ffc14fbecd..6ded4d9fa32a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev, int err; err = enic_dev_fw_info(enic, &fw_info); - /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + /* return only when dma_alloc_coherent fails in vnic_dev_fw_info * For other failures, like devcmd failure, we return previously * recorded info. */ @@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &vstats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d0a8f7106958..66348cc3aaaf 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &stats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ @@ -985,7 +985,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) return -EADDRNOTAVAIL; } - memcpy(netdev->dev_addr, addr, netdev->addr_len); + eth_hw_addr_set(netdev, addr); return 0; } diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c index e6a83198c3dd..80f46dbd5117 100644 --- a/drivers/net/ethernet/cisco/enic/enic_pp.c +++ b/drivers/net/ethernet/cisco/enic/enic_pp.c @@ -73,9 +73,9 @@ static int enic_set_port_profile(struct enic *enic, int vf) struct vic_provinfo *vp; const u8 oui[3] = VIC_PROVINFO_CISCO_OUI; const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX); + const u8 *client_mac; char uuid_str[38]; char client_mac_str[18]; - u8 *client_mac; int err; ENIC_PP_BY_INDEX(enic, vf, pp, &err); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 6e745ca4c433..941f175fb911 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1889,7 +1889,7 @@ static int gmac_set_mac_address(struct net_device *netdev, void *addr) { struct sockaddr *sa = addr; - memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, sa->sa_data); gmac_write_mac_address(netdev); return 0; @@ -2467,13 +2467,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) DEFAULT_NAPI_WEIGHT); if (is_valid_ether_addr((void *)port->mac_addr)) { - memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, (u8 *)port->mac_addr); } else { dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n", port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); - eth_random_addr(netdev->dev_addr); + eth_hw_addr_random(netdev); } gmac_write_mac_address(netdev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index e842de6f6635..e13dd53a8b3b 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1670,7 +1670,7 @@ dm9000_probe(struct platform_device *pdev) if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { mac_src = "platform data"; - memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); + eth_hw_addr_set(ndev, pdata->dev_addr); } if (!is_valid_ether_addr(ndev->dev_addr)) { diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 117c26fa5909..d51b3d24a0c8 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -666,8 +666,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) struct de_private *de = netdev_priv(dev); u16 hash_table[32]; struct netdev_hw_addr *ha; + const u16 *eaddrs; int i; - u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); __set_bit_le(255, hash_table); /* Broadcast entry */ @@ -685,7 +685,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) setup_frm = &de->setup_frame[13*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -695,7 +695,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); struct netdev_hw_addr *ha; - u16 *eaddrs; + const u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ @@ -710,7 +710,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) setup_frm = &de->setup_frame[15*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -1713,6 +1713,7 @@ static const struct ethtool_ops de_ethtool_ops = { static void de21040_get_mac_address(struct de_private *de) { + u8 addr[ETH_ALEN]; unsigned i; dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */ @@ -1724,12 +1725,13 @@ static void de21040_get_mac_address(struct de_private *de) value = dr32(ROMCmd); rmb(); } while (value < 0 && --boguscnt > 0); - de->dev->dev_addr[i] = value; + addr[i] = value; udelay(1); if (boguscnt <= 0) pr_warn("timeout reading 21040 MAC address byte %u\n", i); } + eth_hw_addr_set(de->dev, addr); } static void de21040_get_media_info(struct de_private *de) @@ -1821,8 +1823,7 @@ static void de21041_get_srom_info(struct de_private *de) #endif /* store MAC address */ - for (i = 0; i < 6; i ++) - de->dev->dev_addr[i] = ee_data[i + sa_offset]; + eth_hw_addr_set(de->dev, &ee_data[sa_offset]); /* get offset of controller 0 info leaf. ignore 2nd byte. */ ofs = ee_data[SROMC0InfoLeaf]; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 36ab4cbf2ad0..13121c4dcfe6 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -4031,6 +4031,7 @@ get_hw_addr(struct net_device *dev) int broken, i, k, tmp, status = 0; u_short j,chksum; struct de4x5_private *lp = netdev_priv(dev); + u8 addr[ETH_ALEN]; broken = de4x5_bad_srom(lp); @@ -4042,28 +4043,30 @@ get_hw_addr(struct net_device *dev) if (lp->chipset == DC21040) { while ((tmp = inl(DE4X5_APROM)) < 0); k += (u_char) tmp; - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; while ((tmp = inl(DE4X5_APROM)) < 0); k += (u_short) (tmp << 8); - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; } else if (!broken) { - dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; - dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; + addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; + addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; } else if ((broken == SMC) || (broken == ACCTON)) { - dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; - dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; + addr[i] = *((u_char *)&lp->srom + i); i++; + addr[i] = *((u_char *)&lp->srom + i); i++; } } else { k += (u_char) (tmp = inb(EISA_APROM)); - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; k += (u_short) ((tmp = inb(EISA_APROM)) << 8); - dev->dev_addr[i++] = (u_char) tmp; + addr[i++] = (u_char) tmp; } if (k > 0xffff) k-=0xffff; } if (k == 0xffff) k=0; + eth_hw_addr_set(dev, addr); + if (lp->bus == PCI) { if (lp->chipset == DC21040) { while ((tmp = inl(DE4X5_APROM)) < 0); @@ -4095,8 +4098,9 @@ get_hw_addr(struct net_device *dev) int x = dev->dev_addr[i]; x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4); x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2); - dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); + addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); } + eth_hw_addr_set(dev, addr); } #endif /* CONFIG_PPC_PMAC */ @@ -4158,12 +4162,9 @@ test_bad_enet(struct net_device *dev, int status) if ((tmp == 0) || (tmp == 0x5fa)) { if ((lp->chipset == last.chipset) && (lp->bus_num == last.bus) && (lp->bus_num > 0)) { - for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i]; - for (i=ETH_ALEN-1; i>2; --i) { - dev->dev_addr[i] += 1; - if (dev->dev_addr[i] != 0) break; - } - for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i]; + eth_addr_inc(last.addr); + eth_hw_addr_set(dev, last.addr); + if (!an_exception(lp)) { dev->irq = last.irq; } @@ -5391,9 +5392,7 @@ de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data if (netif_queue_stopped(dev)) return -EBUSY; netif_stop_queue(dev); - for (i=0; i<ETH_ALEN; i++) { - dev->dev_addr[i] = tmp.addr[i]; - } + eth_hw_addr_set(dev, tmp.addr); build_setup_frame(dev, PHYS_ADDR_ONLY); /* Set up the descriptor and give ownership to the card */ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index c763b692e164..83f1727d1423 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -476,8 +476,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Set Node address */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = db->srom[20 + i]; + eth_hw_addr_set(dev, &db->srom[20]); err = register_netdev (dev); if (err) @@ -1436,9 +1435,9 @@ static void update_cr6(u32 cr6_data, void __iomem *ioaddr) static void dm9132_id_table(struct net_device *dev) { + const u16 *addrptr = (const u16 *)dev->dev_addr; struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr + 0xc0; - u16 *addrptr = (u16 *)dev->dev_addr; struct netdev_hw_addr *ha; u16 i, hash_table[4]; @@ -1477,7 +1476,7 @@ static void send_filter_frame(struct net_device *dev) struct dmfe_board_info *db = netdev_priv(dev); struct netdev_hw_addr *ha; struct tx_desc *txptr; - u16 * addrptr; + const u16 * addrptr; u32 * suptr; int i; @@ -1487,7 +1486,7 @@ static void send_filter_frame(struct net_device *dev) suptr = (u32 *) txptr->tx_buf_ptr; /* Node address */ - addrptr = (u16 *) dev->dev_addr; + addrptr = (const u16 *) dev->dev_addr; *suptr++ = addrptr[0]; *suptr++ = addrptr[1]; *suptr++ = addrptr[2]; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index fcedd733bacb..79df5a72877b 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -339,7 +339,7 @@ static void tulip_up(struct net_device *dev) } } else { /* This is set_rx_mode(), but without starting the transmitter. */ - u16 *eaddrs = (u16 *)dev->dev_addr; + const u16 *eaddrs = (const u16 *)dev->dev_addr; u16 *setup_frm = &tp->setup_frame[15*6]; dma_addr_t mapping; @@ -1001,8 +1001,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) struct tulip_private *tp = netdev_priv(dev); u16 hash_table[32]; struct netdev_hw_addr *ha; + const u16 *eaddrs; int i; - u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); __set_bit_le(255, hash_table); /* Broadcast entry */ @@ -1019,7 +1019,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) setup_frm = &tp->setup_frame[13*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -1029,7 +1029,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); struct netdev_hw_addr *ha; - u16 *eaddrs; + const u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ @@ -1044,7 +1044,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) setup_frm = &tp->setup_frame[15*6]; /* Fill the final entry with our physical address. */ - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (const u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; @@ -1305,6 +1305,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) int chip_idx = ent->driver_data; const char *chip_name = tulip_tbl[chip_idx].chip_name; unsigned int eeprom_missing = 0; + u8 addr[ETH_ALEN] __aligned(2); unsigned int force_csr0 = 0; board_idx++; @@ -1506,13 +1507,15 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) do { value = ioread32(ioaddr + CSR9); } while (value < 0 && --boguscnt > 0); - put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); + put_unaligned_le16(value, ((__le16 *)addr) + i); sum += value & 0xffff; } + eth_hw_addr_set(dev, addr); } else if (chip_idx == COMET) { /* No need to read the EEPROM. */ - put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); - put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); + put_unaligned_le32(ioread32(ioaddr + 0xA4), addr); + put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4); + eth_hw_addr_set(dev, addr); for (i = 0; i < 6; i ++) sum += dev->dev_addr[i]; } else { @@ -1575,20 +1578,23 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif for (i = 0; i < 6; i ++) { - dev->dev_addr[i] = ee_data[i + sa_offset]; + addr[i] = ee_data[i + sa_offset]; sum += ee_data[i + sa_offset]; } + eth_hw_addr_set(dev, addr); } /* Lite-On boards have the address byte-swapped. */ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02) && - dev->dev_addr[1] == 0x00) + dev->dev_addr[1] == 0x00) { for (i = 0; i < 6; i+=2) { - char tmp = dev->dev_addr[i]; - dev->dev_addr[i] = dev->dev_addr[i+1]; - dev->dev_addr[i+1] = tmp; + addr[i] = dev->dev_addr[i+1]; + addr[i+1] = dev->dev_addr[i]; } + eth_hw_addr_set(dev, addr); + } + /* On the Zynx 315 Etherarray and other multiport boards only the first Tulip has an EEPROM. On Sparc systems the mac address is held in the OBP property @@ -1599,17 +1605,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (sum == 0 || sum == 6*0xff) { #if defined(CONFIG_SPARC) struct device_node *dp = pci_device_to_OF_node(pdev); - const unsigned char *addr; + const unsigned char *addr2; int len; #endif eeprom_missing = 1; for (i = 0; i < 5; i++) - dev->dev_addr[i] = last_phys_addr[i]; - dev->dev_addr[i] = last_phys_addr[i] + 1; + addr[i] = last_phys_addr[i]; + addr[i] = last_phys_addr[i] + 1; + eth_hw_addr_set(dev, addr); #if defined(CONFIG_SPARC) - addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == ETH_ALEN) - memcpy(dev->dev_addr, addr, ETH_ALEN); + addr2 = of_get_property(dp, "local-mac-address", &len); + if (addr2 && len == ETH_ALEN) + eth_hw_addr_set(dev, addr2); #endif #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ if (last_irq) diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index d67ef7d02d6b..77d9058431e3 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -272,6 +272,7 @@ static int uli526x_init_one(struct pci_dev *pdev, struct uli526x_board_info *db; /* board information structure */ struct net_device *dev; void __iomem *ioaddr; + u8 addr[ETH_ALEN]; int i, err; ULI526X_DBUG(0, "uli526x_init_one()", 0); @@ -379,7 +380,7 @@ static int uli526x_init_one(struct pci_dev *pdev, uw32(DCR13, 0x1b0); //Select ID Table access port //Read MAC address from CR14 for (i = 0; i < 6; i++) - dev->dev_addr[i] = ur32(DCR14); + addr[i] = ur32(DCR14); //Read end uw32(DCR13, 0); //Clear CR13 uw32(DCR0, 0); //Clear CR0 @@ -388,8 +389,10 @@ static int uli526x_init_one(struct pci_dev *pdev, else /*Exist SROM*/ { for (i = 0; i < 6; i++) - dev->dev_addr[i] = db->srom[20 + i]; + addr[i] = db->srom[20 + i]; } + eth_hw_addr_set(dev, addr); + err = register_netdev (dev); if (err) goto err_out_unmap; @@ -1343,7 +1346,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) void __iomem *ioaddr = db->ioaddr; struct netdev_hw_addr *ha; struct tx_desc *txptr; - u16 * addrptr; + const u16 * addrptr; u32 * suptr; int i; @@ -1353,7 +1356,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) suptr = (u32 *) txptr->tx_buf_ptr; /* Node address */ - addrptr = (u16 *) dev->dev_addr; + addrptr = (const u16 *) dev->dev_addr; *suptr++ = addrptr[0] << FLT_SHIFT; *suptr++ = addrptr[1] << FLT_SHIFT; *suptr++ = addrptr[2] << FLT_SHIFT; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 85b99099c6b9..c4217ca5d709 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -355,6 +355,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) int chip_idx = ent->driver_data; int irq; int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + __le16 addr[ETH_ALEN / 2]; void __iomem *ioaddr; i = pcim_enable_device(pdev); @@ -382,7 +383,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_netdev; for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); + addr[i] = cpu_to_le16(eeprom_read(ioaddr, i)); + eth_hw_addr_set(dev, (u8 *)addr); /* Reset the chip to erase previous misconfiguration. No hold time required! */ diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index a8de79355578..8759f9f76b62 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -1015,12 +1015,14 @@ static void read_mac_address(struct xircom_private *card) xw32(CSR10, i + 3); data_count = xr32(CSR9); if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { + u8 addr[ETH_ALEN]; int j; for (j = 0; j < 6; j++) { xw32(CSR10, i + j + 4); - card->dev->dev_addr[j] = xr32(CSR9) & 0xff; + addr[j] = xr32(CSR9) & 0xff; } + eth_hw_addr_set(card->dev, addr); break; } else if (link == 0) { break; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 202ecb132053..a301f7e6a440 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -349,8 +349,7 @@ parse_eeprom (struct net_device *dev) } /* Set MAC address */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = psrom->mac_addr[i]; + eth_hw_addr_set(dev, psrom->mac_addr); if (np->chip_id == CHIP_IP1000A) { np->led_mode = psrom->led_mode; @@ -567,7 +566,7 @@ static void rio_hw_init(struct net_device *dev) */ for (i = 0; i < 3; i++) dw16(StationAddr0 + 2 * i, - cpu_to_le16(((u16 *)dev->dev_addr)[i])); + cpu_to_le16(((const u16 *)dev->dev_addr)[i])); set_multicast (dev); if (np->coalesce) { diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index c36d186dffed..c710dc17be90 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev, int bar = 1; #endif int phy, phy_end, phy_idx = 0; + __le16 addr[ETH_ALEN / 2]; if (pci_enable_device(pdev)) return -EIO; @@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev, goto err_out_res; for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = + addr[i] = cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); + eth_hw_addr_set(dev, (u8 *)addr); np = netdev_priv(dev); np->ndev = dev; @@ -1611,7 +1613,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); __set_mac_addr(dev); return 0; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 6c51cf991dad..92462ed87bc4 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -60,11 +60,11 @@ static void __dnet_set_hwaddr(struct dnet *bp) { u16 tmp; - tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); + tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); - tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); + tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); - tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); + tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); } @@ -93,7 +93,7 @@ static void dnet_get_hwaddr(struct dnet *bp) *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); if (is_valid_ether_addr(addr)) - memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + eth_hw_addr_set(bp->dev, addr); } static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 649c5c429bd7..528eb0f223b1 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1080,7 +1080,7 @@ err: } /* Uses synchronous MCCQ */ -int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, +int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index c30d6d6f0f3a..db1f3b908582 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2385,7 +2385,7 @@ int be_pci_fnum_get(struct be_adapter *adapter); int be_fw_wait_ready(struct be_adapter *adapter); int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, bool permanent, u32 if_handle, u32 pmac_id); -int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, +int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain); int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 domain); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 361c1c87c183..d51f24c9e1b8 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -272,7 +272,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) iowrite32(val, adapter->db + DB_CQ_OFFSET); } -static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac) +static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac) { int i; @@ -369,7 +369,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) /* Remember currently programmed MAC */ ether_addr_copy(adapter->dev_mac, addr->sa_data); done: - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; err: @@ -4599,7 +4599,7 @@ static int be_mac_setup(struct be_adapter *adapter) if (status) return status; - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(adapter->netdev, mac); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); /* Initial MAC for BE3 VFs is already programmed by PF */ @@ -4621,7 +4621,6 @@ static void be_destroy_err_recovery_workq(void) if (!be_err_recovery_workq) return; - flush_workqueue(be_err_recovery_workq); destroy_workqueue(be_err_recovery_workq); be_err_recovery_workq = NULL; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ed1ed48e7483..ed2ef167cdb2 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -707,20 +707,16 @@ static int ethoc_mdio_probe(struct net_device *dev) else phy = phy_find_first(priv->mdio); - if (!phy) { - dev_err(&dev->dev, "no PHY found\n"); - return -ENXIO; - } + if (!phy) + return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n"); priv->old_duplex = -1; priv->old_link = -1; err = phy_connect_direct(dev, phy, ethoc_mdio_poll, PHY_INTERFACE_MODE_GMII); - if (err) { - dev_err(&dev->dev, "could not attach to PHY\n"); - return err; - } + if (err) + return dev_err_probe(&dev->dev, err, "could not attach to PHY\n"); phy_set_max_speed(phy, SPEED_100); @@ -806,8 +802,8 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void ethoc_do_set_mac_address(struct net_device *dev) { + const unsigned char *mac = dev->dev_addr; struct ethoc *priv = netdev_priv(dev); - unsigned char *mac = dev->dev_addr; ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | (mac[5] << 0)); @@ -820,7 +816,7 @@ static int ethoc_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); ethoc_do_set_mac_address(dev); return 0; } @@ -1148,10 +1144,10 @@ static int ethoc_probe(struct platform_device *pdev) /* Allow the platform setup code to pass in a MAC address. */ if (pdata) { - ether_addr_copy(netdev->dev_addr, pdata->hwaddr); + eth_hw_addr_set(netdev, pdata->hwaddr); priv->phy_id = pdata->phy_id; } else { - of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + of_get_ethdev_address(pdev->dev.of_node, netdev); priv->phy_id = -1; } diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig index 38aa824efb25..9241b9b1c7a3 100644 --- a/drivers/net/ethernet/ezchip/Kconfig +++ b/drivers/net/ethernet/ezchip/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP config EZCHIP_NPS_MANAGEMENT_ENET tristate "EZchip NPS management enet support" - depends on OF_IRQ && OF_NET + depends on OF_IRQ depends on HAS_IOMEM help Simple LAN device for debug or management purposes. diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index f9a288a6ec8c..323340826dab 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -421,7 +421,7 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p) res = eth_mac_addr(ndev, p); if (!res) { - ether_addr_copy(ndev->dev_addr, addr->sa_data); + eth_hw_addr_set(ndev, addr->sa_data); nps_enet_set_hw_mac_address(ndev); } @@ -601,7 +601,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); /* set kernel MAC address to dev */ - err = of_get_mac_address(dev->of_node, ndev->dev_addr); + err = of_get_ethdev_address(dev->of_node, ndev); if (err) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ff76e401a014..97c5d70de76e 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -182,13 +182,10 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv) u8 mac[ETH_ALEN]; unsigned int m; unsigned int l; - void *addr; - addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); - if (addr) { - ether_addr_copy(priv->netdev->dev_addr, mac); + if (!device_get_ethdev_address(priv->dev, priv->netdev)) { dev_info(priv->dev, "Read MAC address %pM from device tree\n", - mac); + priv->netdev->dev_addr); return; } @@ -203,7 +200,7 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv) mac[5] = l & 0xff; if (is_valid_ether_addr(mac)) { - ether_addr_copy(priv->netdev->dev_addr, mac); + eth_hw_addr_set(priv->netdev, mac); dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); } else { eth_hw_addr_random(priv->netdev); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 25c91b3c5fd3..63c935e7b625 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -827,7 +827,7 @@ static int netdev_open(struct net_device *dev) return -EAGAIN; for (i = 0; i < 3; i++) - iowrite16(((unsigned short*)dev->dev_addr)[i], + iowrite16(((const unsigned short *)dev->dev_addr)[i], ioaddr + PAR0 + i*2); init_ring(dev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 685d2d8a3b36..6b2927d863e2 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -268,11 +268,11 @@ static int dpaa_netdev_init(struct net_device *net_dev, if (is_valid_ether_addr(mac_addr)) { memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, mac_addr); } else { eth_hw_addr_random(net_dev); err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac, - (enet_addr_t *)net_dev->dev_addr); + (const enet_addr_t *)net_dev->dev_addr); if (err) { dev_err(dev, "Failed to set random MAC address\n"); return -EINVAL; @@ -452,7 +452,7 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) mac_dev = priv->mac_dev; err = mac_dev->change_addr(mac_dev->fman_mac, - (enet_addr_t *)net_dev->dev_addr); + (const enet_addr_t *)net_dev->dev_addr); if (err < 0) { netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", err); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 605a39f892b9..7fefe1574b6a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -189,12 +189,11 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = { .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set, }; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; struct dpaa2_eth_devlink_priv *dl_priv; - int err; priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev); @@ -204,25 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) } dl_priv = devlink_priv(priv->devlink); dl_priv->dpaa2_priv = priv; - - err = devlink_register(priv->devlink); - if (err) { - dev_err(dev, "devlink_register() = %d\n", err); - goto devlink_free; - } - return 0; +} -devlink_free: +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv) +{ devlink_free(priv->devlink); +} - return err; + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) +{ + devlink_register(priv->devlink); } void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv) { devlink_unregister(priv->devlink); - devlink_free(priv->devlink); } int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 7065c71ed7b8..34f189271ef2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4013,7 +4013,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) return err; } } - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, mac_addr); } else if (is_zero_ether_addr(dpni_mac_addr)) { /* No MAC address configured, fill in net_dev->dev_addr * with a random one @@ -4038,7 +4038,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) /* NET_ADDR_PERM is default, all we have to do is * fill in the device addr. */ - memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, dpni_mac_addr); } return 0; @@ -4431,7 +4431,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) if (err) goto err_connect_mac; - err = dpaa2_eth_dl_register(priv); + err = dpaa2_eth_dl_alloc(priv); if (err) goto err_dl_register; @@ -4453,6 +4453,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dpaa2_dbg_add(priv); #endif + dpaa2_eth_dl_register(priv); dev_info(dev, "Probed interface %s\n", net_dev->name); return 0; @@ -4461,7 +4462,7 @@ err_netdev_reg: err_dl_port_add: dpaa2_eth_dl_traps_unregister(priv); err_dl_trap_register: - dpaa2_eth_dl_unregister(priv); + dpaa2_eth_dl_free(priv); err_dl_register: dpaa2_eth_disconnect_mac(priv); err_connect_mac: @@ -4508,6 +4509,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); + dpaa2_eth_dl_unregister(priv); + #ifdef CONFIG_DEBUG_FS dpaa2_dbg_remove(priv); #endif @@ -4519,7 +4522,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); - dpaa2_eth_dl_unregister(priv); + dpaa2_eth_dl_free(priv); if (priv->do_link_poll) kthread_stop(priv->poll_thread); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index cdb623d5f2c1..628d2d45f045 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -725,7 +725,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv); +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv); int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index ae6d382d8735..ef8f0a055024 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -139,7 +139,7 @@ static void dpaa2_mac_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_NA: case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_USXGMII: - phylink_set(mask, 10000baseT_Full); + phylink_set_10g_modes(mask); if (state->interface == PHY_INTERFACE_MODE_10GBASER) break; phylink_set(mask, 5000baseT_Full); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 175f15c46842..d039457928b0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -980,7 +980,7 @@ static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) /* First check if firmware has any address configured by bootloader */ if (!is_zero_ether_addr(mac_addr)) { - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + eth_hw_addr_set(net_dev, mac_addr); } else { /* No MAC address configured, fill in net_dev->dev_addr * with a random one diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 042327b9981f..8e31fe15bf3c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -7,7 +7,9 @@ #include <linux/udp.h> #include <linux/vmalloc.h> #include <linux/ptp_classify.h> +#include <net/ip6_checksum.h> #include <net/pkt_sched.h> +#include <net/tso.h> static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) { @@ -314,12 +316,261 @@ dma_err: return 0; } +static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) +{ + union enetc_tx_bd txbd_tmp; + u8 flags = 0, e_flags = 0; + dma_addr_t addr; + + enetc_clear_tx_bd(&txbd_tmp); + addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; + + if (skb_vlan_tag_present(skb)) + flags |= ENETC_TXBD_FLAGS_EX; + + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.buf_len = cpu_to_le16(hdr_len); + + /* first BD needs frm_len and offload flags set */ + txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); + txbd_tmp.flags = flags; + + /* For the TSO header we do not set the dma address since we do not + * want it unmapped when we do cleanup. We still set len so that we + * count the bytes sent. + */ + tx_swbd->len = hdr_len; + tx_swbd->do_twostep_tstamp = false; + tx_swbd->check_wb = false; + + /* Actually write the header in the BD */ + *txbd = txbd_tmp; + + /* Add extension BD for VLAN */ + if (flags & ENETC_TXBD_FLAGS_EX) { + /* Get the next BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + /* Setup the VLAN fields */ + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + txbd_tmp.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + + /* Write the BD */ + txbd_tmp.ext.e_flags = e_flags; + *txbd = txbd_tmp; + } +} + +static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, char *data, + int size, bool last_bd) +{ + union enetc_tx_bd txbd_tmp; + dma_addr_t addr; + u8 flags = 0; + + enetc_clear_tx_bd(&txbd_tmp); + + addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -ENOMEM; + } + + if (last_bd) { + flags |= ENETC_TXBD_FLAGS_F; + tx_swbd->is_eof = 1; + } + + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.buf_len = cpu_to_le16(size); + txbd_tmp.flags = flags; + + tx_swbd->dma = addr; + tx_swbd->len = size; + tx_swbd->dir = DMA_TO_DEVICE; + + *txbd = txbd_tmp; + + return 0; +} + +static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, + char *hdr, int hdr_len, int *l4_hdr_len) +{ + char *l4_hdr = hdr + skb_transport_offset(skb); + int mac_hdr_len = skb_network_offset(skb); + + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); + + tcph->check = 0; + } else { + struct udphdr *udph = (struct udphdr *)(l4_hdr); + + udph->check = 0; + } + + /* Compute the IP checksum. This is necessary since tso_build_hdr() + * already incremented the IP ID field. + */ + if (!tso->ipv6) { + struct iphdr *iph = (void *)(hdr + mac_hdr_len); + + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } + + /* Compute the checksum over the L4 header. */ + *l4_hdr_len = hdr_len - skb_transport_offset(skb); + return csum_partial(l4_hdr, *l4_hdr_len, 0); +} + +static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, + struct sk_buff *skb, char *hdr, int len, + __wsum sum) +{ + char *l4_hdr = hdr + skb_transport_offset(skb); + __sum16 csum_final; + + /* Complete the L4 checksum by appending the pseudo-header to the + * already computed checksum. + */ + if (!tso->ipv6) + csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + len, ip_hdr(skb)->protocol, sum); + else + csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + len, ipv6_hdr(skb)->nexthdr, sum); + + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); + + tcph->check = csum_final; + } else { + struct udphdr *udph = (struct udphdr *)(l4_hdr); + + udph->check = csum_final; + } +} + +static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + int hdr_len, total_len, data_len; + struct enetc_tx_swbd *tx_swbd; + union enetc_tx_bd *txbd; + struct tso_t tso; + __wsum csum, csum2; + int count = 0, pos; + int err, i, bd_data_num; + + /* Initialize the TSO handler, and prepare the first payload */ + hdr_len = tso_start(skb, &tso); + total_len = skb->len - hdr_len; + i = tx_ring->next_to_use; + + while (total_len > 0) { + char *hdr; + + /* Get the BD */ + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->tx_swbd[i]; + prefetchw(txbd); + + /* Determine the length of this packet */ + data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_len; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); + + /* compute the csum over the L4 header */ + csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); + enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + bd_data_num = 0; + count++; + + while (data_len > 0) { + int size; + + size = min_t(int, tso.size, data_len); + + /* Advance the index in the BDR */ + enetc_bdr_idx_inc(tx_ring, &i); + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->tx_swbd[i]; + prefetchw(txbd); + + /* Compute the checksum over this segment of data and + * add it to the csum already computed (over the L4 + * header and possible other data segments). + */ + csum2 = csum_partial(tso.data, size, 0); + csum = csum_block_add(csum, csum2, pos); + pos += size; + + err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, + tso.data, size, + size == data_len); + if (err) + goto err_map_data; + + data_len -= size; + count++; + bd_data_num++; + tso_build_data(skb, &tso, size); + + if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) + goto err_chained_bd; + } + + enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); + + if (total_len == 0) + tx_swbd->skb = skb; + + /* Go to the next BD */ + enetc_bdr_idx_inc(tx_ring, &i); + } + + tx_ring->next_to_use = i; + enetc_update_tx_ring_tail(tx_ring); + + return count; + +err_map_data: + dev_err(tx_ring->dev, "DMA map error"); + +err_chained_bd: + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_bdr *tx_ring; - int count; + int count, err; /* Queue one-step Sync packet if already locked */ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { @@ -332,19 +583,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, tx_ring = priv->tx_ring[skb->queue_mapping]; - if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) - if (unlikely(skb_linearize(skb))) - goto drop_packet_err; + if (skb_is_gso(skb)) { + if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } - count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ - if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { - netif_stop_subqueue(ndev, tx_ring->index); - return NETDEV_TX_BUSY; - } + enetc_lock_mdio(); + count = enetc_map_tx_tso_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } else { + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } - enetc_lock_mdio(); - count = enetc_map_tx_buffs(tx_ring, skb); - enetc_unlock_mdio(); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + err = skb_checksum_help(skb); + if (err) + goto drop_packet_err; + } + enetc_lock_mdio(); + count = enetc_map_tx_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } if (unlikely(!count)) goto drop_packet_err; @@ -1493,15 +1760,32 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr) return -ENOMEM; err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); - if (err) { - vfree(txr->tx_swbd); - return err; + if (err) + goto err_alloc_bdr; + + txr->tso_headers = dma_alloc_coherent(txr->dev, + txr->bd_count * TSO_HEADER_SIZE, + &txr->tso_headers_dma, + GFP_KERNEL); + if (!txr->tso_headers) { + err = -ENOMEM; + goto err_alloc_tso; } txr->next_to_clean = 0; txr->next_to_use = 0; return 0; + +err_alloc_tso: + dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd), + txr->bd_base, txr->bd_dma_base); + txr->bd_base = NULL; +err_alloc_bdr: + vfree(txr->tx_swbd); + txr->tx_swbd = NULL; + + return err; } static void enetc_free_txbdr(struct enetc_bdr *txr) @@ -1513,6 +1797,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr) size = txr->bd_count * sizeof(union enetc_tx_bd); + dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE, + txr->tso_headers, txr->tso_headers_dma); + txr->tso_headers = NULL; + dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); txr->bd_base = NULL; @@ -2607,10 +2895,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) pcie_flr(pdev); err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 08b283347d9c..fb39e406b7fc 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -112,6 +112,10 @@ struct enetc_bdr { dma_addr_t bd_dma_base; u8 tsd_enable; /* Time specific departure */ bool ext_en; /* enable h/w descriptor extensions */ + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; } ____cacheline_aligned_in_smp; static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4c977dfc44f0..8281dd664f4e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -40,7 +40,7 @@ static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, saddr->sa_data); enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); return 0; @@ -759,10 +759,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; if (si->num_rss) ndev->hw_features |= NETIF_F_RXHASH; @@ -803,10 +807,8 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); err = of_mdiobus_register(bus, np); - if (err) { - dev_err(dev, "cannot register MDIO bus\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "cannot register MDIO bus\n"); pf->mdio = bus; @@ -1215,10 +1217,8 @@ static int enetc_pf_probe(struct pci_dev *pdev, ERR_PTR(err)); err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); - if (err) { - dev_err(&pdev->dev, "PCI probing failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); if (!si->hw.port || !si->hw.global) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index bc594892507a..36b4f51dd297 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -39,10 +39,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, } err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 1a9d1e8b772c..df312c9f8243 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -122,10 +122,14 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; if (si->num_rss) ndev->hw_features |= NETIF_F_RXHASH; @@ -143,10 +147,8 @@ static int enetc_vf_probe(struct pci_dev *pdev, int err; err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); - if (err) { - dev_err(&pdev->dev, "PCI probing failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ec87b370bba1..47a6fc702ac7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1768,7 +1768,7 @@ static int fec_get_mac(struct net_device *ndev) return 0; } - memcpy(ndev->dev_addr, iap, ETH_ALEN); + eth_hw_addr_set(ndev, iap); /* Adjust MAC if using macaddr */ if (iap == macaddr) @@ -3326,7 +3326,7 @@ fec_set_mac_address(struct net_device *ndev, void *p) if (addr) { if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); } /* Add netif status check here to avoid system hang in below case: diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 73ff359a15f1..bbbde9f701c2 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -112,7 +112,7 @@ static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sock = addr; - memcpy(dev->dev_addr, sock->sa_data, dev->addr_len); + eth_hw_addr_set(dev, sock->sa_data); mpc52xx_fec_set_paddr(dev, sock->sa_data); return 0; @@ -890,7 +890,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) * * First try to read MAC address from DT */ - rv = of_get_mac_address(np, ndev->dev_addr); + rv = of_get_ethdev_address(np, ndev); if (rv) { struct mpc52xx_fec __iomem *fec = priv->fec; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index bce3c9398887..1950a8936bc0 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -366,7 +366,7 @@ static void set_dflts(struct dtsec_cfg *cfg) cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME; } -static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr) +static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr) { u32 tmp; @@ -516,7 +516,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, if (addr) { MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr); - set_mac_address(regs, (u8 *)eth_addr); + set_mac_address(regs, (const u8 *)eth_addr); } /* HASH */ @@ -1022,7 +1022,7 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) return 0; } -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) +int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; enum comm_mode mode = COMM_MODE_NONE; @@ -1041,7 +1041,7 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) * Station address have to be swapped (big endian to little endian */ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr); - set_mac_address(dtsec->regs, (u8 *)(*enet_addr)); + set_mac_address(dtsec->regs, (const u8 *)(*enet_addr)); graceful_start(dtsec, mode); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index 5149d96ec2c1..68512c3bd6e5 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -37,7 +37,7 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params); int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val); -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr); +int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr); int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed); int dtsec_restart_autoneg(struct fman_mac *dtsec); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 62f42921933d..2216b7f51d26 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -354,7 +354,7 @@ struct fman_mac { bool allmulti_enabled; }; -static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr, +static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr, u8 paddr_num) { u32 tmp0, tmp1; @@ -897,12 +897,12 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) return 0; } -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr) +int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr) { if (!is_init_done(memac->memac_drv_param)) return -EINVAL; - add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0); + add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0); return 0; } @@ -1058,7 +1058,7 @@ int memac_init(struct fman_mac *memac) /* MAC Address */ if (memac->addr != 0) { MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr); - add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0); + add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0); } fixed_link = memac_drv_param->fixed_link; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index b2c671ec0ce7..3820f7a22983 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -40,7 +40,7 @@ struct fman_mac *memac_config(struct fman_mac_params *params); int memac_set_promiscuous(struct fman_mac *memac, bool new_val); -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr); +int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr); int memac_adjust_link(struct fman_mac *memac, u16 speed); int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val); int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable); diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 41946b16f6c7..311c1906e044 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -221,7 +221,7 @@ struct fman_mac { bool allmulti_enabled; }; -static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr) +static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr) { u32 tmp0, tmp1; @@ -514,13 +514,13 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) return 0; } -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr) +int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr) { if (!is_init_done(tgec->cfg)) return -EINVAL; tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr); - set_mac_address(tgec->regs, (u8 *)(*p_enet_addr)); + set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr)); return 0; } @@ -704,7 +704,7 @@ int tgec_init(struct fman_mac *tgec) if (tgec->addr) { MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr); - set_mac_address(tgec->regs, (u8 *)eth_addr); + set_mac_address(tgec->regs, (const u8 *)eth_addr); } /* interrupts */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index 3bfd1062b386..b28b20b26148 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -37,7 +37,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params); int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val); -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr); +int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr); int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val); int tgec_enable(struct fman_mac *tgec, enum comm_mode mode); int tgec_disable(struct fman_mac *tgec, enum comm_mode mode); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 824a81a9f350..daa285a9b8b2 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -66,7 +66,7 @@ struct mac_device { int (*stop)(struct mac_device *mac_dev); void (*adjust_link)(struct mac_device *mac_dev); int (*set_promisc)(struct fman_mac *mac_dev, bool enable); - int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); + int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 2db6e38a772e..bacf25318f87 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1005,7 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev) spin_lock_init(&fep->lock); spin_lock_init(&fep->tx_lock); - of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + of_get_ethdev_address(ofdev->dev.of_node, ndev); ret = fep->ops->allocate_bd(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index af6ad94bf24a..acab58fd3db3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -753,7 +753,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; - err = of_get_mac_address(np, dev->dev_addr); + err = of_get_ethdev_address(np, dev); if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 3eb288d10b0c..823221c912ab 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3205,7 +3205,7 @@ static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); /* * If device is not running, we will set mac addr register @@ -3731,7 +3731,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) goto err_free_netdev; } - of_get_mac_address(np, dev->dev_addr); + of_get_ethdev_address(np, dev); ugeth->ug_info = ug_info; ugeth->dev = device; diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 62c0bed82ced..62600153b964 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -468,8 +468,7 @@ static int fmvj18x_config(struct pcmcia_device *link) goto failed; } /* Read MACID from CIS */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = buf[i + 5]; + eth_hw_addr_set(dev, &buf[5]); kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) @@ -499,9 +498,7 @@ static int fmvj18x_config(struct pcmcia_device *link) pr_notice("unable to read hardware net address\n"); goto failed; } - for (i = 0 ; i < 6; i++) { - dev->dev_addr[i] = buggybuf[i]; - } + eth_hw_addr_set(dev, buggybuf); card_name = "FMV-J182"; break; case MBH10302: diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 92dc18a4bcc4..51ed8fe71d2d 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -30,7 +30,7 @@ #define GVE_MIN_MSIX 3 /* Numbers of gve tx/rx stats in stats report. */ -#define GVE_TX_STATS_REPORT_NUM 5 +#define GVE_TX_STATS_REPORT_NUM 6 #define GVE_RX_STATS_REPORT_NUM 2 /* Interval to schedule a stats report update, 20000ms. */ @@ -224,11 +224,6 @@ struct gve_tx_iovec { u32 iov_padding; /* padding associated with this segment */ }; -struct gve_tx_dma_buf { - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); -}; - /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc * ring entry but only used for a pkt_desc not a seg_desc */ @@ -236,7 +231,10 @@ struct gve_tx_buffer_state { struct sk_buff *skb; /* skb for this pkt */ union { struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ - struct gve_tx_dma_buf buf; + struct { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + }; }; }; @@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo { * All others correspond to `skb`'s frags and should be unmapped with * `dma_unmap_page`. */ - struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1]; + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); u16 num_bufs; /* Linked list index to next element in the list, or -1 if none */ @@ -342,8 +341,8 @@ struct gve_tx_ring { union { /* GQI fields */ struct { - /* NIC tail pointer */ - __be32 last_nic_done; + /* Spinlock for when cleanup in progress */ + spinlock_t clean_lock; }; /* DQO fields. */ @@ -414,7 +413,9 @@ struct gve_tx_ring { u32 q_num ____cacheline_aligned; /* queue idx */ u32 stop_queue; /* count of queue stops */ u32 wake_queue; /* count of queue wakes */ + u32 queue_timeout; /* count of queue timeouts */ u32 ntfy_id; /* notification block index */ + u32 last_kick_msec; /* Last time the queue was kicked */ dma_addr_t bus; /* dma address of the descr ring */ dma_addr_t q_resources_bus; /* dma address of the queue resources */ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ @@ -822,15 +823,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); bool gve_tx_poll(struct gve_notify_block *block, int budget); int gve_tx_alloc_rings(struct gve_priv *priv); void gve_tx_free_rings_gqi(struct gve_priv *priv); -__be32 gve_tx_load_event_counter(struct gve_priv *priv, - struct gve_tx_ring *tx); +u32 gve_tx_load_event_counter(struct gve_priv *priv, + struct gve_tx_ring *tx); +bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); /* rx handling */ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); -bool gve_rx_poll(struct gve_notify_block *block, int budget); +int gve_rx_poll(struct gve_notify_block *block, int budget); +bool gve_rx_work_pending(struct gve_rx_ring *rx); int gve_rx_alloc_rings(struct gve_priv *priv); void gve_rx_free_rings_gqi(struct gve_priv *priv); -bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, - netdev_features_t feat); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); int gve_reset(struct gve_priv *priv, bool attempt_teardown); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index f089d33dd48e..af2c1d1535f5 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -733,7 +733,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) } priv->dev->max_mtu = mtu; priv->num_event_counters = be16_to_cpu(descriptor->counters); - ether_addr_copy(priv->dev->dev_addr, descriptor->mac); + eth_hw_addr_set(priv->dev, descriptor->mac); mac = descriptor->mac; dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 47c3d8f313fc..3953f6f7a427 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -270,6 +270,7 @@ enum gve_stat_names { TX_LAST_COMPLETION_PROCESSED = 5, RX_NEXT_EXPECTED_SEQUENCE = 6, RX_BUFFERS_POSTED = 7, + TX_TIMEOUT_CNT = 8, // stats from NIC RX_QUEUE_DROP_CNT = 65, RX_NO_BUFFERS_POSTED = 66, diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 716e6240305d..618a3e1d858e 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = tmp_tx_bytes; data[i++] = tx->wake_queue; data[i++] = tx->stop_queue; - data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, - tx)); + data[i++] = gve_tx_load_event_counter(priv, tx); data[i++] = tx->dma_mapping_error; /* stats from NIC */ if (skip_nic_stats) { diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index bf8a4a7c43f7..7647cd05b1d2 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -24,6 +24,9 @@ #define GVE_VERSION "1.0.0" #define GVE_VERSION_PREFIX "GVE-" +// Minimum amount of time between queue kicks in msec (10 seconds) +#define MIN_TX_TIMEOUT_GAP (1000 * 10) + const char gve_version_str[] = GVE_VERSION; static const char gve_version_prefix[] = GVE_VERSION_PREFIX; @@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) __be32 __iomem *irq_doorbell; bool reschedule = false; struct gve_priv *priv; + int work_done = 0; block = container_of(napi, struct gve_notify_block, napi); priv = block->priv; if (block->tx) reschedule |= gve_tx_poll(block, budget); - if (block->rx) - reschedule |= gve_rx_poll(block, budget); + if (block->rx) { + work_done = gve_rx_poll(block, budget); + reschedule |= work_done == budget; + } if (reschedule) return budget; - napi_complete(napi); - irq_doorbell = gve_irq_doorbell(priv, block); - iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); + /* Complete processing - don't unmask irq if busy polling is enabled */ + if (likely(napi_complete_done(napi, work_done))) { + irq_doorbell = gve_irq_doorbell(priv, block); + iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); - /* Double check we have no extra work. - * Ensure unmask synchronizes with checking for work. - */ - mb(); - if (block->tx) - reschedule |= gve_tx_poll(block, -1); - if (block->rx) - reschedule |= gve_rx_poll(block, -1); - if (reschedule && napi_reschedule(napi)) - iowrite32be(GVE_IRQ_MASK, irq_doorbell); + /* Ensure IRQ ACK is visible before we check pending work. + * If queue had issued updates, it would be truly visible. + */ + mb(); - return 0; + if (block->tx) + reschedule |= gve_tx_clean_pending(priv, block->tx); + if (block->rx) + reschedule |= gve_rx_work_pending(block->rx); + + if (reschedule && napi_reschedule(napi)) + iowrite32be(GVE_IRQ_MASK, irq_doorbell); + } + return work_done; } static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) @@ -279,7 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) int i, j; int err; - priv->msix_vectors = kvzalloc(num_vecs_requested * + priv->msix_vectors = kvcalloc(num_vecs_requested, sizeof(*priv->msix_vectors), GFP_KERNEL); if (!priv->msix_vectors) return -ENOMEM; @@ -640,7 +649,7 @@ static int gve_alloc_rings(struct gve_priv *priv) int err; /* Setup tx rings */ - priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx), + priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), GFP_KERNEL); if (!priv->tx) return -ENOMEM; @@ -653,7 +662,7 @@ static int gve_alloc_rings(struct gve_priv *priv) goto free_tx; /* Setup rx rings */ - priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx), + priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), GFP_KERNEL); if (!priv->rx) { err = -ENOMEM; @@ -776,12 +785,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, qpl->id = id; qpl->num_entries = 0; - qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); + qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); /* caller handles clean up */ if (!qpl->pages) return -ENOMEM; - qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses), - GFP_KERNEL); + qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); /* caller handles clean up */ if (!qpl->page_buses) return -ENOMEM; @@ -840,7 +848,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) if (priv->queue_format == GVE_GQI_RDA_FORMAT) return 0; - priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL); + priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); if (!priv->qpls) return -ENOMEM; @@ -859,7 +867,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * sizeof(unsigned long) * BITS_PER_BYTE; - priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) * + priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), sizeof(unsigned long), GFP_KERNEL); if (!priv->qpl_cfg.qpl_id_map) { err = -ENOMEM; @@ -1116,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv) static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) { - struct gve_priv *priv = netdev_priv(dev); + struct gve_notify_block *block; + struct gve_tx_ring *tx = NULL; + struct gve_priv *priv; + u32 last_nic_done; + u32 current_time; + u32 ntfy_idx; + netdev_info(dev, "Timeout on tx queue, %d", txqueue); + priv = netdev_priv(dev); + if (txqueue > priv->tx_cfg.num_queues) + goto reset; + + ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); + if (ntfy_idx > priv->num_ntfy_blks) + goto reset; + + block = &priv->ntfy_blocks[ntfy_idx]; + tx = block->tx; + + current_time = jiffies_to_msecs(jiffies); + if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) + goto reset; + + /* Check to see if there are missed completions, which will allow us to + * kick the queue. + */ + last_nic_done = gve_tx_load_event_counter(priv, tx); + if (last_nic_done - tx->done) { + netdev_info(dev, "Kicking queue %d", txqueue); + iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); + napi_schedule(&block->napi); + tx->last_kick_msec = current_time; + goto out; + } // Else reset. + +reset: gve_schedule_reset(priv); + +out: + if (tx) + tx->queue_timeout++; priv->tx_timeo_cnt++; } @@ -1247,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv) .value = cpu_to_be64(last_completion), .queue_id = cpu_to_be32(idx), }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_TIMEOUT_CNT), + .value = cpu_to_be64(priv->tx[idx].queue_timeout), + .queue_id = cpu_to_be32(idx), + }; } } /* rx stats */ diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 94941d4e4744..95bc4d8a1811 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev, dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK); + page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); } static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) { - if (rx->data.raw_addressing) { - u32 slots = rx->mask + 1; - int i; + u32 slots = rx->mask + 1; + int i; + if (rx->data.raw_addressing) { for (i = 0; i < slots; i++) gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], &rx->data.data_ring[i]); } else { + for (i = 0; i < slots; i++) + page_ref_sub(rx->data.page_info[i].page, + rx->data.page_info[i].pagecnt_bias - 1); gve_unassign_qpl(priv, rx->data.qpl->id); rx->data.qpl = NULL; } @@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, page_info->page_offset = 0; page_info->page_address = page_address(page); *slot_addr = cpu_to_be64(addr); + /* The page already has 1 ref */ + page_ref_add(page, INT_MAX - 1); + page_info->pagecnt_bias = INT_MAX; } static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, @@ -295,21 +302,22 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl static bool gve_rx_can_flip_buffers(struct net_device *netdev) { - return PAGE_SIZE == 4096 + return PAGE_SIZE >= 4096 ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false; } -static int gve_rx_can_recycle_buffer(struct page *page) +static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info) { - int pagecount = page_count(page); + int pagecount = page_count(page_info->page); /* This page is not being used by any SKBs - reuse */ - if (pagecount == 1) + if (pagecount == page_info->pagecnt_bias) return 1; /* This page is still being used by an SKB - we can't reuse */ - else if (pagecount >= 2) + else if (pagecount > page_info->pagecnt_bias) return 0; - WARN(pagecount < 1, "Pagecount should never be < 1"); + WARN(pagecount < page_info->pagecnt_bias, + "Pagecount should never be less than the bias."); return -1; } @@ -325,11 +333,11 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, if (!skb) return NULL; - /* Optimistically stop the kernel from freeing the page by increasing - * the page bias. We will check the refcount in refill to determine if - * we need to alloc a new page. + /* Optimistically stop the kernel from freeing the page. + * We will check again in refill to determine if we need to alloc a + * new page. */ - get_page(page_info->page); + gve_dec_pagecnt_bias(page_info); return skb; } @@ -352,7 +360,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev, /* No point in recycling if we didn't get the skb */ if (skb) { /* Make sure that the page isn't freed. */ - get_page(page_info->page); + gve_dec_pagecnt_bias(page_info); gve_rx_flip_buff(page_info, &data_slot->qpl_offset); } } else { @@ -376,8 +384,18 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, union gve_rx_data_slot *data_slot; struct sk_buff *skb = NULL; dma_addr_t page_bus; + void *va; u16 len; + /* Prefetch two packet pages ahead, we will need it soon. */ + page_info = &rx->data.page_info[(idx + 2) & rx->mask]; + va = page_info->page_address + GVE_RX_PAD + + page_info->page_offset; + + prefetch(page_info->page); /* Kernel page struct. */ + prefetch(va); /* Packet header. */ + prefetch(va + 64); /* Next cacheline too. */ + /* drop this packet */ if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) { u64_stats_update_begin(&rx->statss); @@ -408,7 +426,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, int recycle = 0; if (can_flip) { - recycle = gve_rx_can_recycle_buffer(page_info->page); + recycle = gve_rx_can_recycle_buffer(page_info); if (recycle < 0) { if (!rx->data.raw_addressing) gve_schedule_reset(priv); @@ -456,7 +474,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, return true; } -static bool gve_rx_work_pending(struct gve_rx_ring *rx) +bool gve_rx_work_pending(struct gve_rx_ring *rx) { struct gve_rx_desc *desc; __be16 flags_seq; @@ -499,7 +517,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) * owns half the page it is impossible to tell which half. Either * the whole page is free or it needs to be replaced. */ - int recycle = gve_rx_can_recycle_buffer(page_info->page); + int recycle = gve_rx_can_recycle_buffer(page_info); if (recycle < 0) { if (!rx->data.raw_addressing) @@ -514,8 +532,13 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) gve_rx_free_buffer(dev, page_info, data_slot); page_info->page = NULL; - if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot)) + if (gve_rx_alloc_buffer(priv, dev, page_info, + data_slot)) { + u64_stats_update_begin(&rx->statss); + rx->rx_buf_alloc_fail++; + u64_stats_update_end(&rx->statss); break; + } } } fill_cnt++; @@ -524,8 +547,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) return true; } -bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, - netdev_features_t feat) +static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, + netdev_features_t feat) { struct gve_priv *priv = rx->gve; u32 work_done = 0, packets = 0; @@ -546,6 +569,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, "[%d] seqno=%d rx->desc.seqno=%d\n", rx->q_num, GVE_SEQNO(desc->flags_seq), rx->desc.seqno); + + /* prefetch two descriptors ahead */ + prefetch(rx->desc.desc_ring + ((cnt + 2) & rx->mask)); + dropped = !gve_rx(rx, desc, feat, idx); if (!dropped) { bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; @@ -559,13 +586,15 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, } if (!work_done && rx->fill_cnt - cnt > rx->db_threshold) - return false; + return 0; - u64_stats_update_begin(&rx->statss); - rx->rpackets += packets; - rx->rbytes += bytes; - u64_stats_update_end(&rx->statss); - rx->cnt = cnt; + if (work_done) { + u64_stats_update_begin(&rx->statss); + rx->rpackets += packets; + rx->rbytes += bytes; + u64_stats_update_end(&rx->statss); + rx->cnt = cnt; + } /* restock ring slots */ if (!rx->data.raw_addressing) { @@ -576,26 +605,26 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, * falls below a threshold. */ if (!gve_rx_refill_buffers(priv, rx)) - return false; + return 0; /* If we were not able to completely refill buffers, we'll want * to schedule this queue for work again to refill buffers. */ if (rx->fill_cnt - cnt <= rx->db_threshold) { gve_rx_write_doorbell(priv, rx); - return true; + return budget; } } gve_rx_write_doorbell(priv, rx); - return gve_rx_work_pending(rx); + return work_done; } -bool gve_rx_poll(struct gve_notify_block *block, int budget) +int gve_rx_poll(struct gve_notify_block *block, int budget) { struct gve_rx_ring *rx = block->rx; netdev_features_t feat; - bool repoll = false; + int work_done = 0; feat = block->napi.dev->features; @@ -604,8 +633,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget) budget = INT_MAX; if (budget > 0) - repoll |= gve_clean_rx_done(rx, budget, feat); - else - repoll |= gve_rx_work_pending(rx); - return repoll; + work_done = gve_clean_rx_done(rx, budget, feat); + + return work_done; } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 665ac795a1ad..a9cb241fedf4 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) gve_tx_remove_from_block(priv, idx); slots = tx->mask + 1; - gve_clean_tx_done(priv, tx, tx->req, false); + gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); netdev_tx_reset_queue(tx->netdev_txq); dma_free_coherent(hdev, sizeof(*tx->q_resources), @@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) /* Make sure everything is zeroed to start */ memset(tx, 0, sizeof(*tx)); + spin_lock_init(&tx->clean_lock); tx->q_num = idx; tx->mask = slots - 1; @@ -303,15 +304,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) { if (info->skb) { - dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), - dma_unmap_len(&info->buf, len), + dma_unmap_single(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), DMA_TO_DEVICE); - dma_unmap_len_set(&info->buf, len, 0); + dma_unmap_len_set(info, len, 0); } else { - dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), - dma_unmap_len(&info->buf, len), + dma_unmap_page(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), DMA_TO_DEVICE); - dma_unmap_len_set(&info->buf, len, 0); + dma_unmap_len_set(info, len, 0); } } @@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); } +static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED); + /* Stops the queue if the skb cannot be transmitted. */ -static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) +static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) { int bytes_required = 0; + u32 nic_done; + u32 to_do; + int ret; if (!tx->raw_addressing) bytes_required = gve_skb_fifo_bytes_required(tx, skb); @@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) if (likely(gve_can_tx(tx, bytes_required))) return 0; - /* No space, so stop the queue */ - tx->stop_queue++; - netif_tx_stop_queue(tx->netdev_txq); - smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */ - - /* Now check for resources again, in case gve_clean_tx_done() freed - * resources after we checked and we stopped the queue after - * gve_clean_tx_done() checked. - * - * gve_maybe_stop_tx() gve_clean_tx_done() - * nsegs/can_alloc test failed - * gve_tx_free_fifo() - * if (tx queue stopped) - * netif_tx_queue_wake() - * netif_tx_stop_queue() - * Need to check again for space here! - */ - if (likely(!gve_can_tx(tx, bytes_required))) - return -EBUSY; + ret = -EBUSY; + spin_lock(&tx->clean_lock); + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = nic_done - tx->done; - netif_tx_start_queue(tx->netdev_txq); - tx->wake_queue++; - return 0; + /* Only try to clean if there is hope for TX */ + if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { + if (to_do > 0) { + to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT); + gve_clean_tx_done(priv, tx, to_do, false); + } + if (likely(gve_can_tx(tx, bytes_required))) + ret = 0; + } + if (ret) { + /* No space, so stop the queue */ + tx->stop_queue++; + netif_tx_stop_queue(tx->netdev_txq); + } + spin_unlock(&tx->clean_lock); + + return ret; } static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, @@ -491,7 +497,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct gve_tx_buffer_state *info; bool is_gso = skb_is_gso(skb); u32 idx = tx->req & tx->mask; - struct gve_tx_dma_buf *buf; u64 addr; u32 len; int i; @@ -515,9 +520,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dma_mapping_error++; goto drop; } - buf = &info->buf; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); + dma_unmap_len_set(info, len, len); + dma_unmap_addr_set(info, dma, addr); payload_nfrags = shinfo->nr_frags; if (hlen < len) { @@ -549,10 +553,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dma_mapping_error++; goto unmap_drop; } - buf = &tx->info[idx].buf; tx->info[idx].skb = NULL; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); + dma_unmap_len_set(&tx->info[idx], len, len); + dma_unmap_addr_set(&tx->info[idx], dma, addr); gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); } @@ -579,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, "skb queue index out of range"); tx = &priv->tx[skb_get_queue_mapping(skb)]; - if (unlikely(gve_maybe_stop_tx(tx, skb))) { + if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { /* We need to ring the txq doorbell -- we have stopped the Tx * queue for want of resources, but prior calls to gve_tx() * may have added descriptors without ringing the doorbell. @@ -675,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, return pkts; } -__be32 gve_tx_load_event_counter(struct gve_priv *priv, - struct gve_tx_ring *tx) +u32 gve_tx_load_event_counter(struct gve_priv *priv, + struct gve_tx_ring *tx) { - u32 counter_index = be32_to_cpu((tx->q_resources->counter_index)); + u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); + __be32 counter = READ_ONCE(priv->counter_array[counter_index]); - return READ_ONCE(priv->counter_array[counter_index]); + return be32_to_cpu(counter); } bool gve_tx_poll(struct gve_notify_block *block, int budget) { struct gve_priv *priv = block->priv; struct gve_tx_ring *tx = block->tx; - bool repoll = false; u32 nic_done; u32 to_do; @@ -695,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget) if (budget == 0) budget = INT_MAX; + /* In TX path, it may try to clean completed pkts in order to xmit, + * to avoid cleaning conflict, use spin_lock(), it yields better + * concurrency between xmit/clean than netif's lock. + */ + spin_lock(&tx->clean_lock); /* Find out how much work there is to be done */ - tx->last_nic_done = gve_tx_load_event_counter(priv, tx); - nic_done = be32_to_cpu(tx->last_nic_done); - if (budget > 0) { - /* Do as much work as we have that the budget will - * allow - */ - to_do = min_t(u32, (nic_done - tx->done), budget); - gve_clean_tx_done(priv, tx, to_do, true); - } + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = min_t(u32, (nic_done - tx->done), budget); + gve_clean_tx_done(priv, tx, to_do, true); + spin_unlock(&tx->clean_lock); /* If we still have work we want to repoll */ - repoll |= (nic_done != tx->done); - return repoll; + return nic_done != tx->done; +} + +bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) +{ + u32 nic_done = gve_tx_load_event_counter(priv, tx); + + return nic_done != tx->done; } diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 05ddb6a75c38..ec394d991668 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) int j; for (j = 0; j < cur_state->num_bufs; j++) { - struct gve_tx_dma_buf *buf = &cur_state->bufs[j]; - if (j == 0) { dma_unmap_single(tx->dev, - dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), - DMA_TO_DEVICE); + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); } else { dma_unmap_page(tx->dev, - dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), - DMA_TO_DEVICE); + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); } } if (cur_state->skb) { @@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, const bool is_gso = skb_is_gso(skb); u32 desc_idx = tx->dqo_tx.tail; - struct gve_tx_pending_packet_dqo *pending_packet; + struct gve_tx_pending_packet_dqo *pkt; struct gve_tx_metadata_dqo metadata; s16 completion_tag; int i; - pending_packet = gve_alloc_pending_packet(tx); - pending_packet->skb = skb; - pending_packet->num_bufs = 0; - completion_tag = pending_packet - tx->dqo.pending_packets; + pkt = gve_alloc_pending_packet(tx); + pkt->skb = skb; + pkt->num_bufs = 0; + completion_tag = pkt - tx->dqo.pending_packets; gve_extract_tx_metadata_dqo(skb, &metadata); if (is_gso) { @@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, /* Map the linear portion of skb */ { - struct gve_tx_dma_buf *buf = - &pending_packet->bufs[pending_packet->num_bufs]; u32 len = skb_headlen(skb); dma_addr_t addr; @@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, if (unlikely(dma_mapping_error(tx->dev, addr))) goto err; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); - ++pending_packet->num_bufs; + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, completion_tag, @@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, } for (i = 0; i < shinfo->nr_frags; i++) { - struct gve_tx_dma_buf *buf = - &pending_packet->bufs[pending_packet->num_bufs]; const skb_frag_t *frag = &shinfo->frags[i]; bool is_eop = i == (shinfo->nr_frags - 1); u32 len = skb_frag_size(frag); @@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, if (unlikely(dma_mapping_error(tx->dev, addr))) goto err; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); - ++pending_packet->num_bufs; + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, completion_tag, is_eop, is_gso); @@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, return 0; err: - for (i = 0; i < pending_packet->num_bufs; i++) { - struct gve_tx_dma_buf *buf = &pending_packet->bufs[i]; - + for (i = 0; i < pkt->num_bufs; i++) { if (i == 0) { - dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), + dma_unmap_single(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); } else { - dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); + dma_unmap_page(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); } } - pending_packet->skb = NULL; - pending_packet->num_bufs = 0; - gve_free_pending_packet(tx, pending_packet); + pkt->skb = NULL; + pkt->num_bufs = 0; + gve_free_pending_packet(tx, pkt); return -1; } @@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, static void remove_from_list(struct gve_tx_ring *tx, struct gve_index_list *list, - struct gve_tx_pending_packet_dqo *pending_packet) + struct gve_tx_pending_packet_dqo *pkt) { s16 prev_index, next_index; - prev_index = pending_packet->prev; - next_index = pending_packet->next; + prev_index = pkt->prev; + next_index = pkt->next; if (prev_index == -1) { /* Node is head */ @@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx, } static void gve_unmap_packet(struct device *dev, - struct gve_tx_pending_packet_dqo *pending_packet) + struct gve_tx_pending_packet_dqo *pkt) { - struct gve_tx_dma_buf *buf; int i; /* SKB linear portion is guaranteed to be mapped */ - buf = &pending_packet->bufs[0]; - dma_unmap_single(dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); - for (i = 1; i < pending_packet->num_bufs; i++) { - buf = &pending_packet->bufs[i]; - dma_unmap_page(dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); + for (i = 1; i < pkt->num_bufs; i++) { + dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); } - pending_packet->num_bufs = 0; + pkt->num_bufs = 0; } /* Completion types and expected behavior: diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 93f3dcbeeea9..45ff7a9ab5f9 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) { + unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2, + num_online_cpus()); int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; struct gve_tx_ring *tx = &priv->tx[queue_idx]; block->tx = tx; tx->ntfy_id = ntfy_idx; + netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus), + queue_idx); } void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 37b605fed32c..c84ef494bd60 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -998,7 +998,7 @@ static int hip04_mac_probe(struct platform_device *pdev) hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); hip04_config_fifo(priv); - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); hip04_update_mac_address(ndev); ret = hip04_alloc_ring(ndev, d); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 22bf914f2dbd..a6c18b6527f9 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -427,7 +427,7 @@ static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv) } static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv, - unsigned char *mac) + const unsigned char *mac) { u32 reg; @@ -555,7 +555,7 @@ static int hisi_femac_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(skaddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, skaddr->sa_data); dev->addr_assign_type &= ~NET_ADDR_RANDOM; hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); @@ -841,7 +841,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) (unsigned long)phy->phy_id, phy_modes(phy->interface)); - ret = of_get_mac_address(node, ndev->dev_addr); + ret = of_get_ethdev_address(node, ndev); if (ret) { eth_hw_addr_random(ndev); dev_warn(dev, "using random MAC address %pM\n", diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index c1aae0fca5e9..d7e62eca050f 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -429,7 +429,7 @@ static void hix5hd2_port_disable(struct hix5hd2_priv *priv) static void hix5hd2_hw_set_mac_addr(struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); - unsigned char *mac = dev->dev_addr; + const unsigned char *mac = dev->dev_addr; u32 val; val = mac[1] | (mac[0] << 8); @@ -1219,7 +1219,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) goto out_phy_node; } - ret = of_get_mac_address(node, ndev->dev_addr); + ret = of_get_ethdev_address(node, ndev); if (ret) { eth_hw_addr_random(ndev); netdev_warn(ndev, "using random MAC address %pM\n", diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 2b7db1c22321..d72657444ef3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -499,7 +499,7 @@ struct hnae_ae_ops { u32 *tx_usecs_high, u32 *rx_usecs_high); void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); int (*get_mac_addr)(struct hnae_handle *handle, void **p); - int (*set_mac_addr)(struct hnae_handle *handle, void *p); + int (*set_mac_addr)(struct hnae_handle *handle, const void *p); int (*add_uc_addr)(struct hnae_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae_handle *handle, @@ -558,7 +558,7 @@ struct hnae_handle { enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ struct hnae_buf_ops *bops; /* operation for the buffer */ - struct hnae_queue **qs; /* array base of all queues */ + struct hnae_queue *qs[]; /* flexible array of all queues */ }; #define ring_to_dev(ring) ((ring)->q->dev->dev) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 75e4ec569da8..bc3e406f0139 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -81,8 +81,8 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id); qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id); - vf_cb = kzalloc(sizeof(*vf_cb) + - qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL); + vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf), + GFP_KERNEL); if (unlikely(!vf_cb)) { dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n"); ae_handle = ERR_PTR(-ENOMEM); @@ -108,7 +108,6 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, goto vf_id_err; } - ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1); for (i = 0; i < qnum_per_vf; i++) { ae_handle->qs[i] = &ring_pair_cb->q; ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; @@ -207,7 +206,7 @@ static void hns_ae_fini_queue(struct hnae_queue *q) hns_rcb_reset_ring_hw(q); } -static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p) +static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p) { int ret; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index f387a859a201..8f391e2adcc0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -450,7 +450,7 @@ static void hns_gmac_update_stats(void *mac_drv) += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG); } -static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr) +static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index f41379de2186..7edf8569514c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -240,7 +240,7 @@ int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) *@addr:mac address */ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, - u32 vmid, char *addr) + u32 vmid, const char *addr) { int ret; struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 8943ffab4418..e3bb05959ba9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -348,7 +348,7 @@ struct mac_driver { /*disable mac when disable nic or dsaf*/ void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode); /* config mac address*/ - void (*set_mac_addr)(void *mac_drv, char *mac_addr); + void (*set_mac_addr)(void *mac_drv, const char *mac_addr); /*adjust mac mode of port,include speed and duplex*/ int (*adjust_link)(void *mac_drv, enum mac_speed speed, u32 full_duplex); @@ -425,7 +425,8 @@ int hns_mac_init(struct dsaf_device *dsaf_dev); void mac_adjust_link(struct net_device *net_dev); bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); -int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); +int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, + const char *addr); int hns_mac_set_multi(struct hns_mac_cb *mac_cb, u32 port_num, char *addr, bool enable); int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index cba04bfa0b3f..5526a10caac5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -210,7 +210,7 @@ struct hnae_vf_cb { u8 port_index; struct hns_mac_cb *mac_cb; struct dsaf_device *dsaf_dev; - struct hnae_handle ae_handle; /* must be the last number */ + struct hnae_handle ae_handle; /* must be the last member */ }; struct dsaf_int_xge_src { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 401fef5f1d07..fc26ffaae620 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -255,7 +255,7 @@ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin); } -static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) +static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 343c605c4be8..22a463e15678 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1194,7 +1194,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) return ret; } - memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, mac_addr->sa_data); return 0; } @@ -1212,7 +1212,7 @@ static void hns_init_mac_addr(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { + if (device_get_ethdev_address(priv->dev, ndev)) { eth_hw_addr_random(ndev); dev_warn(priv->dev, "No valid mac, use random mac %pM", ndev->dev_addr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8ba21d6dc220..98d63e804903 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, + HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -151,6 +152,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps) +#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -294,6 +298,7 @@ enum hnae3_dbg_cmd { HNAE3_DBG_CMD_MAC_TNL_STATUS, HNAE3_DBG_CMD_SERV_INFO, HNAE3_DBG_CMD_UMV_INFO, + HNAE3_DBG_CMD_PAGE_POOL_INFO, HNAE3_DBG_CMD_UNKNOWN, }; @@ -341,6 +346,8 @@ struct hnae3_dev_specs { u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ u16 max_frm_size; u16 max_qset_num; + u16 umv_size; + u16 mc_mac_size; }; struct hnae3_client_ops { @@ -588,7 +595,7 @@ struct hnae3_ae_ops { u32 *tx_usecs_high, u32 *rx_usecs_high); void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); - int (*set_mac_addr)(struct hnae3_handle *handle, void *p, + int (*set_mac_addr)(struct hnae3_handle *handle, const void *p, bool is_first); int (*do_ioctl)(struct hnae3_handle *handle, struct ifreq *ifr, int cmd); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 2b66c59f5eaf..b26d43c9c088 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -336,6 +336,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .buf_len = HNS3_DBG_READ_LEN, .init = hns3_dbg_common_file_init, }, + { + .name = "page_pool_info", + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, }; static struct hns3_dbg_cap_info hns3_dbg_cap[] = { @@ -924,6 +931,10 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) dev_specs->max_tm_rate); *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", dev_specs->max_qset_num); + *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", + dev_specs->umv_size); + *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", + dev_specs->mc_mac_size); } static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) @@ -937,6 +948,69 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) return 0; } +static const struct hns3_dbg_item page_pool_info_items[] = { + { "QUEUE_ID", 2 }, + { "ALLOCATE_CNT", 2 }, + { "FREE_CNT", 6 }, + { "POOL_SIZE(PAGE_NUM)", 2 }, + { "ORDER", 2 }, + { "NUMA_ID", 2 }, + { "MAX_LEN", 2 }, +}; + +static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, + char **result, u32 index) +{ + u32 j = 0; + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); + sprintf(result[j++], "%u", + atomic_read(&ring->page_pool->pages_state_release_cnt)); + sprintf(result[j++], "%u", ring->page_pool->p.pool_size); + sprintf(result[j++], "%u", ring->page_pool->p.order); + sprintf(result[j++], "%d", ring->page_pool->p.nid); + sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); +} + +static int +hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) +{ + char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(page_pool_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items, + NULL, ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_page_pool_info(ring, result, i); + hns3_dbg_fill_content(content, sizeof(content), + page_pool_info_items, + (const char **)result, + ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; @@ -978,6 +1052,10 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = { .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, .dbg_dump = hns3_dbg_tx_queue_info, }, + { + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dbg_dump = hns3_dbg_page_pool_info, + }, }; static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 468b8f07bf47..fea1be4c02ed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2287,7 +2287,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return ret; } - ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + eth_hw_addr_set(netdev, mac_addr->sa_data); return 0; } @@ -4933,7 +4933,7 @@ static int hns3_init_mac_addr(struct net_device *netdev) dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { - ether_addr_copy(netdev->dev_addr, mac_addr_temp); + eth_hw_addr_set(netdev, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); } else { return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 33244472e0d0..bfcfefa9d2b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1188,7 +1188,10 @@ struct hclge_dev_specs_1_cmd { __le16 max_frm_size; __le16 max_qset_num; __le16 max_int_gl; - u8 rsv1[18]; + u8 rsv0[2]; + __le16 umv_size; + __le16 mc_mac_size; + u8 rsv1[12]; }; /* mac speed type defined in firmware command */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 32f62cd2dd99..f0aa4fbd2200 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1992,6 +1992,9 @@ static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) } mutex_unlock(&hdev->vport_lock); + pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c index e4aad695abcc..4c441e6a5082 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -109,7 +109,6 @@ int hclge_devlink_init(struct hclge_dev *hdev) struct pci_dev *pdev = hdev->pdev; struct hclge_devlink_priv *priv; struct devlink *devlink; - int ret; devlink = devlink_alloc(&hclge_devlink_ops, sizeof(struct hclge_devlink_priv), &pdev->dev); @@ -120,28 +119,15 @@ int hclge_devlink_init(struct hclge_dev *hdev) priv->hdev = hdev; hdev->devlink = devlink; - ret = devlink_register(devlink); - if (ret) { - dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", - ret); - goto out_reg_fail; - } - - devlink_reload_enable(devlink); - + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; - -out_reg_fail: - devlink_free(devlink); - return ret; } void hclge_devlink_uninit(struct hclge_dev *hdev) { struct devlink *devlink = hdev->devlink; - devlink_reload_disable(devlink); - devlink_unregister(devlink); devlink_free(devlink); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f5b8d1fee0f1..7bb34af3981e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1342,8 +1342,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_UMV_TBL_SPACE_M, HCLGE_CFG_UMV_TBL_SPACE_S); - if (!cfg->umv_space) - cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), HCLGE_CFG_PF_RSS_SIZE_M, @@ -1419,6 +1417,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@ -1440,6 +1439,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -1460,6 +1461,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev) dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; if (!dev_specs->max_frm_size) dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } static int hclge_query_dev_specs(struct hclge_dev *hdev) @@ -1549,7 +1552,10 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; - hdev->wanted_umv_size = cfg.umv_space; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) @@ -8498,6 +8504,9 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + return 0; } @@ -8515,6 +8524,8 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; } static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) @@ -8769,6 +8780,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; + bool is_new_addr = false; int status; /* mac addr check */ @@ -8782,6 +8794,13 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + /* This mac addr do not exist, add new entry for it */ memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data)); @@ -8791,12 +8810,18 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, if (status) return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - /* if already overflow, not to print each time */ - if (status == -ENOSPC && - !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) - dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++; return status; + +err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + return -ENOSPC; } static int hclge_rm_mc_addr(struct hnae3_handle *handle, @@ -8833,12 +8858,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, if (status) return status; - if (hclge_is_all_function_id_zero(desc)) + if (hclge_is_all_function_id_zero(desc)) { /* All the vfid is zero, so need to delete this entry */ status = hclge_remove_mac_vlan_tbl(vport, &req); - else + if (!status) + hdev->used_mc_mac_num--; + } else { /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } } else if (status == -ENOENT) { status = 0; } @@ -9414,7 +9442,7 @@ int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, return 0; } -static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index de6afbcbfbac..ca25e2edf3f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -938,6 +938,8 @@ struct hclge_dev { u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num; DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c index f478770299c6..fdc19868b818 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -110,7 +110,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev) struct pci_dev *pdev = hdev->pdev; struct hclgevf_devlink_priv *priv; struct devlink *devlink; - int ret; devlink = devlink_alloc(&hclgevf_devlink_ops, @@ -122,28 +121,15 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev) priv->hdev = hdev; hdev->devlink = devlink; - ret = devlink_register(devlink); - if (ret) { - dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", - ret); - goto out_reg_fail; - } - - devlink_reload_enable(devlink); - + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; - -out_reg_fail: - devlink_free(devlink); - return ret; } void hclgevf_devlink_uninit(struct hclgevf_dev *hdev) { struct devlink *devlink = hdev->devlink; - devlink_reload_disable(devlink); - devlink_unregister(devlink); devlink_free(devlink); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 5fdac8685f95..2e6dcf75fba6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1349,7 +1349,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 6e11ee339f12..60ae8bfc5f69 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink) devlink_free(devlink); } -int hinic_devlink_register(struct hinic_devlink_priv *priv) +void hinic_devlink_register(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); - return devlink_register(devlink); + devlink_register(devlink); } void hinic_devlink_unregister(struct hinic_devlink_priv *priv) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h index 9e315011015c..46760d607b9b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -110,7 +110,7 @@ struct host_image_st { struct devlink *hinic_devlink_alloc(struct device *dev); void hinic_devlink_free(struct devlink *devlink); -int hinic_devlink_register(struct hinic_devlink_priv *priv); +void hinic_devlink_register(struct hinic_devlink_priv *priv); void hinic_devlink_unregister(struct hinic_devlink_priv *priv); int hinic_health_reporters_create(struct hinic_devlink_priv *priv); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 56b6b04e209b..657a15447bd0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -754,17 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } - err = hinic_devlink_register(hwdev->devlink_dev); - if (err) { - dev_err(&hwif->pdev->dev, "Failed to register devlink\n"); - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); - return err; - } - err = hinic_func_to_func_init(hwdev); if (err) { dev_err(&hwif->pdev->dev, "Failed to init mailbox\n"); - hinic_devlink_unregister(hwdev->devlink_dev); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); return err; } @@ -787,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) } hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); - + hinic_devlink_register(hwdev->devlink_dev); return 0; } @@ -799,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) { struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + hinic_devlink_unregister(hwdev->devlink_dev); hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); if (!HINIC_IS_VF(hwdev->hwif)) { @@ -816,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) hinic_func_to_func_free(hwdev); - hinic_devlink_unregister(hwdev->devlink_dev); - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index ae707e305684..6414e922cf8c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -656,7 +656,7 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr) err = change_mac_addr(netdev, new_mac); if (!err) - memcpy(netdev->dev_addr, new_mac, ETH_ALEN); + eth_hw_addr_set(netdev, new_mac); return err; } @@ -1379,10 +1379,8 @@ static int hinic_probe(struct pci_dev *pdev, { int err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, HINIC_DRV_NAME); if (err) { diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 0696f723228a..3909c6a0af89 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -339,14 +339,13 @@ static const struct net_device_ops sun3_82586_netdev_ops = { static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) { - int i, size, retval; + int size, retval; if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME)) return -EBUSY; /* copy in the ethernet address from the prom */ - for(i = 0; i < 6 ; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr); @@ -461,7 +460,7 @@ static int init586(struct net_device *dev) ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST); ias_cmd->cmd_link = 0xffff; - memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); + memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN); p->scb->cbl_offset = make16(ias_cmd); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index d5df131b183c..bad94e4d50f4 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1741,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) goto out_free; } - memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, mac_addr->sa_data); /* Deregister old MAC in pHYP */ if (port->state == EHEA_PORT_UP) { @@ -2986,7 +2986,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, SET_NETDEV_DEV(dev, port_dev); /* initialize net_device structure */ - memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, (u8 *)&port->mac_addr); dev->netdev_ops = &ehea_netdev_ops; ehea_set_ethtool_ops(dev); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 664a91af662d..6b3fc8823c54 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1013,7 +1013,7 @@ static int emac_set_mac_address(struct net_device *ndev, void *sa) mutex_lock(&dev->link_lock); - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); emac_rx_disable(dev); emac_tx_disable(dev); @@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev) static int emac_init_config(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; - const void *p; int err; /* Read config from device-tree */ @@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev) } /* Read MAC-address */ - p = of_get_property(np, "local-mac-address", NULL); - if (p == NULL) { - printk(KERN_ERR "%pOF: Can't find local-mac-address property\n", - np); - return -ENXIO; + err = of_get_ethdev_address(np, dev->ndev); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n"); + return err; } - memcpy(dev->ndev->dev_addr, p, ETH_ALEN); /* IAHT and GAHT filter parameterization */ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3d9b4f99d357..836617fb3f40 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -605,17 +605,13 @@ static int ibmveth_open(struct net_device *netdev) } rc = -ENOMEM; - adapter->bounce_buffer = - kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) - goto out_free_irq; - adapter->bounce_buffer_dma = - dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, - netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - netdev_err(netdev, "unable to map bounce buffer\n"); - goto out_free_bounce_buffer; + adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev, + netdev->mtu + IBMVETH_BUFF_OH, + &adapter->bounce_buffer_dma, GFP_KERNEL); + if (!adapter->bounce_buffer) { + netdev_err(netdev, "unable to alloc bounce buffer\n"); + goto out_free_irq; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -627,8 +623,6 @@ static int ibmveth_open(struct net_device *netdev) return 0; -out_free_bounce_buffer: - kfree(adapter->bounce_buffer); out_free_irq: free_irq(netdev->irq, netdev); out_free_buffer_pools: @@ -702,10 +696,9 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); - dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - kfree(adapter->bounce_buffer); + dma_free_coherent(&adapter->vdev->dev, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + adapter->bounce_buffer, adapter->bounce_buffer_dma); netdev_dbg(netdev, "close complete\n"); @@ -1620,7 +1613,7 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p) return rc; } - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -1727,7 +1720,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->min_mtu = IBMVETH_MIN_MTU; netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; - memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); + eth_hw_addr_set(netdev, mac_addr_p); if (firmware_has_feature(FW_FEATURE_CMO)) memcpy(pool_count, pool_count_cmo, sizeof(pool_count)); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6aa6ff89a765..9d61167ba767 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter); static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *tx_scrq); +static void free_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, return -ETIMEDOUT; } +/** + * reuse_ltb() - Check if a long term buffer can be reused + * @ltb: The long term buffer to be checked + * @size: The size of the long term buffer. + * + * An LTB can be reused unless its size has changed. + * + * Return: Return true if the LTB can be reused, false otherwise. + */ +static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) +{ + return (ltb->buff && ltb->size == size); +} + +/** + * alloc_long_term_buff() - Allocate a long term buffer (LTB) + * + * @adapter: ibmvnic adapter associated to the LTB + * @ltb: container object for the LTB + * @size: size of the LTB + * + * Allocate an LTB of the specified size and notify VIOS. + * + * If the given @ltb already has the correct size, reuse it. Otherwise if + * its non-NULL, free it. Then allocate a new one of the correct size. + * Notify the VIOS either way since we may now be working with a new VIOS. + * + * Allocating larger chunks of memory during resets, specially LPM or under + * low memory situations can cause resets to fail/timeout and for LPAR to + * lose connectivity. So hold onto the LTB even if we fail to communicate + * with the VIOS and reuse it on next open. Free LTB when adapter is closed. + * + * Return: 0 if we were able to allocate the LTB and notify the VIOS and + * a negative value otherwise. + */ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; int rc; - ltb->size = size; - ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, - GFP_KERNEL); + if (!reuse_ltb(ltb, size)) { + dev_dbg(dev, + "LTB size changed from 0x%llx to 0x%x, reallocating\n", + ltb->size, size); + free_long_term_buff(adapter, ltb); + } - if (!ltb->buff) { - dev_err(dev, "Couldn't alloc long term buffer\n"); - return -ENOMEM; + if (ltb->buff) { + dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", + ltb->map_id, ltb->size); + } else { + ltb->buff = dma_alloc_coherent(dev, size, <b->addr, + GFP_KERNEL); + if (!ltb->buff) { + dev_err(dev, "Couldn't alloc long term buffer\n"); + return -ENOMEM; + } + ltb->size = size; + + ltb->map_id = find_first_zero_bit(adapter->map_ids, + MAX_MAP_ID); + bitmap_set(adapter->map_ids, ltb->map_id, 1); + + dev_dbg(dev, + "Allocated new LTB [map %d, size 0x%llx]\n", + ltb->map_id, ltb->size); } - ltb->map_id = adapter->map_id; - adapter->map_id++; + + /* Ensure ltb is zeroed - specially when reusing it. */ + memset(ltb->buff, 0, ltb->size); mutex_lock(&adapter->fw_lock); adapter->fw_done_rc = 0; @@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); if (rc) { - dev_err(dev, - "Long term map request aborted or timed out,rc = %d\n", + dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", rc); goto out; } if (adapter->fw_done_rc) { - dev_err(dev, "Couldn't map long term buffer,rc = %d\n", + dev_err(dev, "Couldn't map LTB, rc = %d\n", adapter->fw_done_rc); rc = -1; goto out; } rc = 0; out: - if (rc) { - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - ltb->buff = NULL; - } + /* don't free LTB on communication error - see function header */ mutex_unlock(&adapter->fw_lock); return rc; } @@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_TIMEOUT) send_request_unmap(adapter, ltb->map_id); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + ltb->buff = NULL; + /* mark this map_id free */ + bitmap_clear(adapter->map_ids, ltb->map_id, 1); ltb->map_id = 0; } -static int reset_long_term_buff(struct ibmvnic_adapter *adapter, - struct ibmvnic_long_term_buff *ltb) -{ - struct device *dev = &adapter->vdev->dev; - int rc; - - memset(ltb->buff, 0, ltb->size); - - mutex_lock(&adapter->fw_lock); - adapter->fw_done_rc = 0; - - reinit_completion(&adapter->fw_done); - rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - if (rc) { - mutex_unlock(&adapter->fw_lock); - return rc; - } - - rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); - if (rc) { - dev_info(dev, - "Reset failed, long term map request timed out or aborted\n"); - mutex_unlock(&adapter->fw_lock); - return rc; - } - - if (adapter->fw_done_rc) { - dev_info(dev, - "Reset failed, attempting to free and reallocate buffer\n"); - free_long_term_buff(adapter, ltb); - mutex_unlock(&adapter->fw_lock); - return alloc_long_term_buff(adapter, ltb, ltb->size); - } - mutex_unlock(&adapter->fw_lock); - return 0; -} - static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) { int i; @@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, * be 0. */ for (i = ind_bufp->index; i < count; ++i) { - skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); + index = pool->free_map[pool->next_free]; + + /* We maybe reusing the skb from earlier resets. Allocate + * only if necessary. But since the LTB may have changed + * during reset (see init_rx_pools()), update LTB below + * even if reusing skb. + */ + skb = pool->rx_buff[index].skb; if (!skb) { - dev_err(dev, "Couldn't replenish rx buff\n"); - adapter->replenish_no_mem++; - break; + skb = netdev_alloc_skb(adapter->netdev, + pool->buff_size); + if (!skb) { + dev_err(dev, "Couldn't replenish rx buff\n"); + adapter->replenish_no_mem++; + break; + } } - index = pool->free_map[pool->next_free]; - - if (pool->rx_buff[index].skb) - dev_err(dev, "Inconsistent free_map!\n"); + pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + pool->next_free = (pool->next_free + 1) % pool->size; /* Copy the skb to the long term mapped DMA buffer */ offset = index * pool->buff_size; dst = pool->long_term_buff.buff + offset; memset(dst, 0, pool->buff_size); dma_addr = pool->long_term_buff.addr + offset; - pool->rx_buff[index].data = dst; - pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + /* add the skb to an rx_buff in the pool */ + pool->rx_buff[index].data = dst; pool->rx_buff[index].dma = dma_addr; pool->rx_buff[index].skb = skb; pool->rx_buff[index].pool_index = pool->index; pool->rx_buff[index].size = pool->buff_size; + /* queue the rx_buff for the next send_subcrq_indirect */ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; memset(sub_crq, 0, sizeof(*sub_crq)); sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; @@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, shift = 8; #endif sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); - pool->next_free = (pool->next_free + 1) % pool->size; + + /* if send_subcrq_indirect queue is full, flush to VIOS */ if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || i == count - 1) { lpar_rc = @@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) return 0; } -static int reset_rx_pools(struct ibmvnic_adapter *adapter) -{ - struct ibmvnic_rx_pool *rx_pool; - u64 buff_size; - int rx_scrqs; - int i, j, rc; - - if (!adapter->rx_pool) - return -1; - - buff_size = adapter->cur_rx_buf_sz; - rx_scrqs = adapter->num_active_rx_pools; - for (i = 0; i < rx_scrqs; i++) { - rx_pool = &adapter->rx_pool[i]; - - netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); - - if (rx_pool->buff_size != buff_size) { - free_long_term_buff(adapter, &rx_pool->long_term_buff); - rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); - rc = alloc_long_term_buff(adapter, - &rx_pool->long_term_buff, - rx_pool->size * - rx_pool->buff_size); - } else { - rc = reset_long_term_buff(adapter, - &rx_pool->long_term_buff); - } - - if (rc) - return rc; - - for (j = 0; j < rx_pool->size; j++) - rx_pool->free_map[j] = j; - - memset(rx_pool->rx_buff, 0, - rx_pool->size * sizeof(struct ibmvnic_rx_buff)); - - atomic_set(&rx_pool->available, 0); - rx_pool->next_alloc = 0; - rx_pool->next_free = 0; - rx_pool->active = 1; - } - - return 0; -} - +/** + * release_rx_pools() - Release any rx pools attached to @adapter. + * @adapter: ibmvnic adapter + * + * Safe to call this multiple times - even if no pools are attached. + */ static void release_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; @@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); kfree(rx_pool->free_map); + free_long_term_buff(adapter, &rx_pool->long_term_buff); if (!rx_pool->rx_buff) @@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) kfree(adapter->rx_pool); adapter->rx_pool = NULL; adapter->num_active_rx_pools = 0; + adapter->prev_rx_pool_size = 0; } +/** + * reuse_rx_pools() - Check if the existing rx pools can be reused. + * @adapter: ibmvnic adapter + * + * Check if the existing rx pools in the adapter can be reused. The + * pools can be reused if the pool parameters (number of pools, + * number of buffers in the pool and size of each buffer) have not + * changed. + * + * NOTE: This assumes that all pools have the same number of buffers + * which is the case currently. If that changes, we must fix this. + * + * Return: true if the rx pools can be reused, false otherwise. + */ +static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) +{ + u64 old_num_pools, new_num_pools; + u64 old_pool_size, new_pool_size; + u64 old_buff_size, new_buff_size; + + if (!adapter->rx_pool) + return false; + + old_num_pools = adapter->num_active_rx_pools; + new_num_pools = adapter->req_rx_queues; + + old_pool_size = adapter->prev_rx_pool_size; + new_pool_size = adapter->req_rx_add_entries_per_subcrq; + + old_buff_size = adapter->prev_rx_buf_sz; + new_buff_size = adapter->cur_rx_buf_sz; + + /* Require buff size to be exactly same for now */ + if (old_buff_size != new_buff_size) + return false; + + if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) + return true; + + if (old_num_pools < adapter->min_rx_queues || + old_num_pools > adapter->max_rx_queues || + old_pool_size < adapter->min_rx_add_entries_per_subcrq || + old_pool_size > adapter->max_rx_add_entries_per_subcrq) + return false; + + return true; +} + +/** + * init_rx_pools(): Initialize the set of receiver pools in the adapter. + * @netdev: net device associated with the vnic interface + * + * Initialize the set of receiver pools in the ibmvnic adapter associated + * with the net_device @netdev. If possible, reuse the existing rx pools. + * Otherwise free any existing pools and allocate a new set of pools + * before initializing them. + * + * Return: 0 on success and negative value on error. + */ static int init_rx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->vdev->dev; struct ibmvnic_rx_pool *rx_pool; - int rxadd_subcrqs; + u64 num_pools; + u64 pool_size; /* # of buffers in one pool */ u64 buff_size; int i, j; - rxadd_subcrqs = adapter->num_active_rx_scrqs; + pool_size = adapter->req_rx_add_entries_per_subcrq; + num_pools = adapter->req_rx_queues; buff_size = adapter->cur_rx_buf_sz; - adapter->rx_pool = kcalloc(rxadd_subcrqs, + if (reuse_rx_pools(adapter)) { + dev_dbg(dev, "Reusing rx pools\n"); + goto update_ltb; + } + + /* Allocate/populate the pools. */ + release_rx_pools(adapter); + + adapter->rx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL); if (!adapter->rx_pool) { @@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev) return -1; } - adapter->num_active_rx_pools = rxadd_subcrqs; + /* Set num_active_rx_pools early. If we fail below after partial + * allocation, release_rx_pools() will know how many to look for. + */ + adapter->num_active_rx_pools = num_pools; - for (i = 0; i < rxadd_subcrqs; i++) { + for (i = 0; i < num_pools; i++) { rx_pool = &adapter->rx_pool[i]; netdev_dbg(adapter->netdev, "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", - i, adapter->req_rx_add_entries_per_subcrq, - buff_size); + i, pool_size, buff_size); - rx_pool->size = adapter->req_rx_add_entries_per_subcrq; + rx_pool->size = pool_size; rx_pool->index = i; rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); - rx_pool->active = 1; rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), GFP_KERNEL); if (!rx_pool->free_map) { - release_rx_pools(adapter); - return -1; + dev_err(dev, "Couldn't alloc free_map %d\n", i); + goto out_release; } rx_pool->rx_buff = kcalloc(rx_pool->size, @@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->rx_buff) { dev_err(dev, "Couldn't alloc rx buffers\n"); - release_rx_pools(adapter); - return -1; + goto out_release; } - - if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size)) { - release_rx_pools(adapter); - return -1; - } - - for (j = 0; j < rx_pool->size; ++j) - rx_pool->free_map[j] = j; - - atomic_set(&rx_pool->available, 0); - rx_pool->next_alloc = 0; - rx_pool->next_free = 0; } - return 0; -} - -static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, - struct ibmvnic_tx_pool *tx_pool) -{ - int rc, i; - - rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); - if (rc) - return rc; - - memset(tx_pool->tx_buff, 0, - tx_pool->num_buffers * - sizeof(struct ibmvnic_tx_buff)); + adapter->prev_rx_pool_size = pool_size; + adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; - for (i = 0; i < tx_pool->num_buffers; i++) - tx_pool->free_map[i] = i; +update_ltb: + for (i = 0; i < num_pools; i++) { + rx_pool = &adapter->rx_pool[i]; + dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", + i, rx_pool->size, rx_pool->buff_size); - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; + if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, + rx_pool->size * rx_pool->buff_size)) + goto out; - return 0; -} + for (j = 0; j < rx_pool->size; ++j) { + struct ibmvnic_rx_buff *rx_buff; -static int reset_tx_pools(struct ibmvnic_adapter *adapter) -{ - int tx_scrqs; - int i, rc; + rx_pool->free_map[j] = j; - if (!adapter->tx_pool) - return -1; + /* NOTE: Don't clear rx_buff->skb here - will leak + * memory! replenish_rx_pool() will reuse skbs or + * allocate as necessary. + */ + rx_buff = &rx_pool->rx_buff[j]; + rx_buff->dma = 0; + rx_buff->data = 0; + rx_buff->size = 0; + rx_buff->pool_index = 0; + } - tx_scrqs = adapter->num_active_tx_pools; - for (i = 0; i < tx_scrqs; i++) { - ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); - rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); - if (rc) - return rc; - rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); - if (rc) - return rc; + /* Mark pool "empty" so replenish_rx_pools() will + * update the LTB info for each buffer + */ + atomic_set(&rx_pool->available, 0); + rx_pool->next_alloc = 0; + rx_pool->next_free = 0; + /* replenish_rx_pool() may have called deactivate_rx_pools() + * on failover. Ensure pool is active now. + */ + rx_pool->active = 1; } - return 0; +out_release: + release_rx_pools(adapter); +out: + /* We failed to allocate one or more LTBs or map them on the VIOS. + * Hold onto the pools and any LTBs that we did allocate/map. + */ + return -1; } static void release_vpd_data(struct ibmvnic_adapter *adapter) @@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter, free_long_term_buff(adapter, &tx_pool->long_term_buff); } +/** + * release_tx_pools() - Release any tx pools attached to @adapter. + * @adapter: ibmvnic adapter + * + * Safe to call this multiple times - even if no pools are attached. + */ static void release_tx_pools(struct ibmvnic_adapter *adapter) { int i; + /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are + * both NULL or both non-NULL. So we only need to check one. + */ if (!adapter->tx_pool) return; @@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) kfree(adapter->tso_pool); adapter->tso_pool = NULL; adapter->num_active_tx_pools = 0; + adapter->prev_tx_pool_size = 0; } static int init_one_tx_pool(struct net_device *netdev, struct ibmvnic_tx_pool *tx_pool, - int num_entries, int buf_size) + int pool_size, int buf_size) { - struct ibmvnic_adapter *adapter = netdev_priv(netdev); int i; - tx_pool->tx_buff = kcalloc(num_entries, + tx_pool->tx_buff = kcalloc(pool_size, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); if (!tx_pool->tx_buff) return -1; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - num_entries * buf_size)) - return -1; - - tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); - if (!tx_pool->free_map) + tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) { + kfree(tx_pool->tx_buff); + tx_pool->tx_buff = NULL; return -1; + } - for (i = 0; i < num_entries; i++) + for (i = 0; i < pool_size; i++) tx_pool->free_map[i] = i; tx_pool->consumer_index = 0; tx_pool->producer_index = 0; - tx_pool->num_buffers = num_entries; + tx_pool->num_buffers = pool_size; tx_pool->buf_size = buf_size; return 0; } +/** + * reuse_tx_pools() - Check if the existing tx pools can be reused. + * @adapter: ibmvnic adapter + * + * Check if the existing tx pools in the adapter can be reused. The + * pools can be reused if the pool parameters (number of pools, + * number of buffers in the pool and mtu) have not changed. + * + * NOTE: This assumes that all pools have the same number of buffers + * which is the case currently. If that changes, we must fix this. + * + * Return: true if the tx pools can be reused, false otherwise. + */ +static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) +{ + u64 old_num_pools, new_num_pools; + u64 old_pool_size, new_pool_size; + u64 old_mtu, new_mtu; + + if (!adapter->tx_pool) + return false; + + old_num_pools = adapter->num_active_tx_pools; + new_num_pools = adapter->num_active_tx_scrqs; + old_pool_size = adapter->prev_tx_pool_size; + new_pool_size = adapter->req_tx_entries_per_subcrq; + old_mtu = adapter->prev_mtu; + new_mtu = adapter->req_mtu; + + /* Require MTU to be exactly same to reuse pools for now */ + if (old_mtu != new_mtu) + return false; + + if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) + return true; + + if (old_num_pools < adapter->min_tx_queues || + old_num_pools > adapter->max_tx_queues || + old_pool_size < adapter->min_tx_entries_per_subcrq || + old_pool_size > adapter->max_tx_entries_per_subcrq) + return false; + + return true; +} + +/** + * init_tx_pools(): Initialize the set of transmit pools in the adapter. + * @netdev: net device associated with the vnic interface + * + * Initialize the set of transmit pools in the ibmvnic adapter associated + * with the net_device @netdev. If possible, reuse the existing tx pools. + * Otherwise free any existing pools and allocate a new set of pools + * before initializing them. + * + * Return: 0 on success and negative value on error. + */ static int init_tx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int tx_subcrqs; + struct device *dev = &adapter->vdev->dev; + int num_pools; + u64 pool_size; /* # of buffers in pool */ u64 buff_size; - int i, rc; + int i, j, rc; + + num_pools = adapter->req_tx_queues; + + /* We must notify the VIOS about the LTB on all resets - but we only + * need to alloc/populate pools if either the number of buffers or + * size of each buffer in the pool has changed. + */ + if (reuse_tx_pools(adapter)) { + netdev_dbg(netdev, "Reusing tx pools\n"); + goto update_ltb; + } + + /* Allocate/populate the pools. */ + release_tx_pools(adapter); - tx_subcrqs = adapter->num_active_tx_scrqs; - adapter->tx_pool = kcalloc(tx_subcrqs, + pool_size = adapter->req_tx_entries_per_subcrq; + num_pools = adapter->num_active_tx_scrqs; + + adapter->tx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) return -1; - adapter->tso_pool = kcalloc(tx_subcrqs, + adapter->tso_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + /* To simplify release_tx_pools() ensure that ->tx_pool and + * ->tso_pool are either both NULL or both non-NULL. + */ if (!adapter->tso_pool) { kfree(adapter->tx_pool); adapter->tx_pool = NULL; return -1; } - adapter->num_active_tx_pools = tx_subcrqs; + /* Set num_active_tx_pools early. If we fail below after partial + * allocation, release_tx_pools() will know how many to look for. + */ + adapter->num_active_tx_pools = num_pools; + + buff_size = adapter->req_mtu + VLAN_HLEN; + buff_size = ALIGN(buff_size, L1_CACHE_BYTES); + + for (i = 0; i < num_pools; i++) { + dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", + i, adapter->req_tx_entries_per_subcrq, buff_size); - for (i = 0; i < tx_subcrqs; i++) { - buff_size = adapter->req_mtu + VLAN_HLEN; - buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], - adapter->req_tx_entries_per_subcrq, - buff_size); - if (rc) { - release_tx_pools(adapter); - return rc; - } + pool_size, buff_size); + if (rc) + goto out_release; rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], IBMVNIC_TSO_BUFS, IBMVNIC_TSO_BUF_SZ); - if (rc) { - release_tx_pools(adapter); - return rc; - } + if (rc) + goto out_release; + } + + adapter->prev_tx_pool_size = pool_size; + adapter->prev_mtu = adapter->req_mtu; + +update_ltb: + /* NOTE: All tx_pools have the same number of buffers (which is + * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS + * buffers (see calls init_one_tx_pool() for these). + * For consistency, we use tx_pool->num_buffers and + * tso_pool->num_buffers below. + */ + rc = -1; + for (i = 0; i < num_pools; i++) { + struct ibmvnic_tx_pool *tso_pool; + struct ibmvnic_tx_pool *tx_pool; + u32 ltb_size; + + tx_pool = &adapter->tx_pool[i]; + ltb_size = tx_pool->num_buffers * tx_pool->buf_size; + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + ltb_size)) + goto out; + + dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", + i, tx_pool->long_term_buff.buff, + tx_pool->num_buffers, tx_pool->buf_size); + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + + for (j = 0; j < tx_pool->num_buffers; j++) + tx_pool->free_map[j] = j; + + tso_pool = &adapter->tso_pool[i]; + ltb_size = tso_pool->num_buffers * tso_pool->buf_size; + if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, + ltb_size)) + goto out; + + dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", + i, tso_pool->long_term_buff.buff, + tso_pool->num_buffers, tso_pool->buf_size); + + tso_pool->consumer_index = 0; + tso_pool->producer_index = 0; + + for (j = 0; j < tso_pool->num_buffers; j++) + tso_pool->free_map[j] = j; } return 0; +out_release: + release_tx_pools(adapter); +out: + /* We failed to allocate one or more LTBs or map them on the VIOS. + * Hold onto the pools and any LTBs that we did allocate/map. + */ + return rc; } static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) @@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter) { release_vpd_data(adapter); - release_tx_pools(adapter); - release_rx_pools(adapter); - release_napi(adapter); release_login_buffer(adapter); release_login_rsp_buffer(adapter); @@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter) return rc; } - adapter->map_id = 1; - rc = init_napi(adapter); if (rc) return rc; @@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev) if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); goto out; } } @@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev) ibmvnic_napi_disable(adapter); ibmvnic_disable_irqs(adapter); - - clean_rx_pools(adapter); - clean_tx_pools(adapter); } static int __ibmvnic_close(struct net_device *netdev) @@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev) rc = __ibmvnic_close(netdev); ibmvnic_cleanup(netdev); + clean_rx_pools(adapter); + clean_tx_pools(adapter); return rc; } @@ -2036,9 +2226,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { + struct net_device *netdev = adapter->netdev; u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_slots, old_num_tx_slots; - struct net_device *netdev = adapter->netdev; int rc; netdev_dbg(adapter->netdev, @@ -2188,8 +2378,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, !adapter->rx_pool || !adapter->tso_pool || !adapter->tx_pool) { - release_rx_pools(adapter); - release_tx_pools(adapter); release_napi(adapter); release_vpd_data(adapter); @@ -2198,16 +2386,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, goto out; } else { - rc = reset_tx_pools(adapter); + rc = init_tx_pools(netdev); if (rc) { - netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + netdev_dbg(netdev, + "init tx pools failed (%d)\n", rc); goto out; } - rc = reset_rx_pools(adapter); + rc = init_rx_pools(netdev); if (rc) { - netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + netdev_dbg(netdev, + "init rx pools failed (%d)\n", rc); goto out; } @@ -4576,8 +4766,7 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq, /* crq->change_mac_addr.mac_addr is the requested one * crq->change_mac_addr_rsp.mac_addr is the returned valid one. */ - ether_addr_copy(netdev->dev_addr, - &crq->change_mac_addr_rsp.mac_addr[0]); + eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); ether_addr_copy(adapter->mac_addr, &crq->change_mac_addr_rsp.mac_addr[0]); out: @@ -4778,9 +4967,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); return; } - netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", - crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, - crq->query_map_rsp.free_pages); + netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", + crq->query_map_rsp.page_size, + __be32_to_cpu(crq->query_map_rsp.tot_pages), + __be32_to_cpu(crq->query_map_rsp.free_pages)); } static void handle_query_cap_rsp(union ibmvnic_crq *crq, @@ -5527,9 +5717,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; adapter->login_pending = false; + memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); + /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ + bitmap_set(adapter->map_ids, 0, 1); ether_addr_copy(adapter->mac_addr, mac_addr_p); - ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + eth_hw_addr_set(netdev, adapter->mac_addr); netdev->irq = dev->irq; netdev->netdev_ops = &ibmvnic_netdev_ops; netdev->ethtool_ops = &ibmvnic_ethtool_ops; @@ -5547,6 +5740,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_completion(&adapter->reset_done); init_completion(&adapter->stats_done); clear_bit(0, &adapter->resetting); + adapter->prev_rx_buf_sz = 0; + adapter->prev_mtu = 0; init_success = false; do { @@ -5647,6 +5842,8 @@ static void ibmvnic_remove(struct vio_dev *dev) unregister_netdevice(netdev); release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); release_sub_crqs(adapter, 1); release_crq_queue(adapter); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22df602323bc..b8e42f67d897 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -827,7 +827,7 @@ struct ibmvnic_rx_buff { struct ibmvnic_rx_pool { struct ibmvnic_rx_buff *rx_buff; - int size; + int size; /* # of buffers in the pool */ int index; int buff_size; atomic_t available; @@ -967,6 +967,7 @@ struct ibmvnic_adapter { u64 min_mtu; u64 max_mtu; u64 req_mtu; + u64 prev_mtu; u64 max_multicast_filters; u64 vlan_header_insertion; u64 rx_vlan_header_insertion; @@ -979,13 +980,18 @@ struct ibmvnic_adapter { u64 opt_tx_entries_per_subcrq; u64 opt_rxba_entries_per_subcrq; __be64 tx_rx_desc_req; - u8 map_id; +#define MAX_MAP_ID 255 + DECLARE_BITMAP(map_ids, MAX_MAP_ID); u32 num_active_rx_scrqs; u32 num_active_rx_pools; u32 num_active_rx_napi; u32 num_active_tx_scrqs; u32 num_active_tx_pools; + + u32 prev_rx_pool_size; + u32 prev_tx_pool_size; u32 cur_rx_buf_sz; + u32 prev_rx_buf_sz; struct tasklet_struct tasklet; enum vnic_state state; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index ed8ea63bb172..0b274d8fa45b 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -313,6 +313,20 @@ config ICE To compile this driver as a module, choose M here. The module will be called ice. +config ICE_SWITCHDEV + bool "Switchdev Support" + default y + depends on ICE && NET_SWITCHDEV + help + Switchdev support provides internal SRIOV packet steering and switching. + + To enable it on running kernel use devlink tool: + #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev + + Say Y here if you want to use Switchdev in the driver. + + If unsure, say N. + config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 09ae1939e6db..5039a2536951 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2259,7 +2259,7 @@ static int e100_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); e100_exec_cb(nic, NULL, e100_setup_iaaddr); return 0; @@ -2921,7 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e100_phy_init(nic); - memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + eth_hw_addr_set(netdev, (u8 *)nic->eeprom); if (!is_valid_ether_addr(netdev->dev_addr)) { if (!eeprom_bad_csum_allow) { netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index bed4f040face..669060a2e6aa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1103,7 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e_err(probe, "EEPROM Read Error\n"); } /* don't block initialization here due to bad MAC address */ - memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) e_err(probe, "Invalid MAC Address\n"); @@ -2209,7 +2209,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) if (hw->mac_type == e1000_82542_rev2_0) e1000_enter_82542_rst(adapter); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); e1000_rar_set(hw, hw->mac_addr, 0); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 5b2143f4b1f8..f3424255bd2b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -21,6 +21,7 @@ #include <linux/ptp_classify.h> #include <linux/mii.h> #include <linux/mdio.h> +#include <linux/mutex.h> #include <linux/pm_qos.h> #include "hw.h" diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 900b3ab998bd..ff8672ae2b6c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4786,7 +4786,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); @@ -7589,7 +7589,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error while reading MAC address\n"); - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2fb52bd6fc0e..2cca9e84e31e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -990,7 +990,7 @@ static int fm10k_set_mac(struct net_device *dev, void *p) } if (!err) { - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); ether_addr_copy(hw->mac.addr, addr->sa_data); dev->addr_assign_type &= ~NET_ADDR_RANDOM; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index adfa2768f024..b473cb7d7c57 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -300,7 +300,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface) if (is_valid_ether_addr(hw->mac.perm_addr)) { ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + eth_hw_addr_set(netdev, hw->mac.perm_addr); netdev->addr_assign_type &= ~NET_ADDR_RANDOM; } @@ -2045,7 +2045,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, netdev->addr_assign_type |= NET_ADDR_RANDOM; } - ether_addr_copy(netdev->dev_addr, hw->mac.addr); + eth_hw_addr_set(netdev, hw->mac.addr); ether_addr_copy(netdev->perm_addr, hw->mac.addr); if (!is_valid_ether_addr(netdev->perm_addr)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 39fb3d57c057..3d528fba754b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -435,7 +435,7 @@ static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch) return !!ch->fwd; } -static inline u8 *i40e_channel_mac(struct i40e_channel *ch) +static inline const u8 *i40e_channel_mac(struct i40e_channel *ch) { if (i40e_is_channel_macvlan(ch)) return ch->fwd->netdev->dev_addr; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e04b540cedc8..ba862131b9bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1587,7 +1587,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) */ spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -13425,7 +13425,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); - ether_addr_copy(netdev->dev_addr, mac_addr); + eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index e7e778ca074c..6f85879ba993 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) { u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; - struct xdp_buff **bi, *xdp; + struct xdp_buff **xdp; + u32 nb_buffs, i; dma_addr_t dma; - bool ok = true; rx_desc = I40E_RX_DESC(rx_ring, ntu); - bi = i40e_rx_bi(rx_ring, ntu); - do { - xdp = xsk_buff_alloc(rx_ring->xsk_pool); - if (!xdp) { - ok = false; - goto no_buffers; - } - *bi = xdp; - dma = xsk_buff_xdp_get_dma(xdp); + xdp = i40e_rx_bi(rx_ring, ntu); + + nb_buffs = min_t(u16, count, rx_ring->count - ntu); + nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); + if (!nb_buffs) + return false; + + i = nb_buffs; + while (i--) { + dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->read.hdr_addr = 0; rx_desc++; - bi++; - ntu++; - - if (unlikely(ntu == rx_ring->count)) { - rx_desc = I40E_RX_DESC(rx_ring, 0); - bi = i40e_rx_bi(rx_ring, 0); - ntu = 0; - } - } while (--count); + xdp++; + } -no_buffers: - if (rx_ring->next_to_use != ntu) { - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.qword1.status_error_len = 0; - i40e_release_rx_desc(rx_ring, ntu); + ntu += nb_buffs; + if (ntu == rx_ring->count) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + xdp = i40e_rx_bi(rx_ring, 0); + ntu = 0; } - return ok; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + i40e_release_rx_desc(rx_ring, ntu); + + return count == nb_buffs ? true : false; } /** @@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) break; bi = *i40e_rx_bi(rx_ring, next_to_clean); - bi->data_end = bi->data + size; + xsk_buff_set_size(bi, size); xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); xdp_res = i40e_run_xdp_zc(rx_ring, bi); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index cada4e0e40b4..ca4712a73574 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1847,7 +1847,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 3c735968e1b8..8eb8d4663a81 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1685,7 +1685,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (!v_retval) iavf_mac_add_ok(adapter); if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = @@ -1716,7 +1716,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { /* refresh current mac address if changed */ - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 4f538cdf42c1..c36faa7d1471 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -26,10 +26,13 @@ ice-y := ice_main.o \ ice_devlink.o \ ice_fw_update.o \ ice_lag.o \ - ice_ethtool.o + ice_ethtool.o \ + ice_repr.o \ + ice_tc_lib.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o +ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 3c4f08d20414..30cc748337dd 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -63,6 +63,8 @@ #include "ice_fdir.h" #include "ice_xsk.h" #include "ice_arfs.h" +#include "ice_repr.h" +#include "ice_eswitch.h" #include "ice_lag.h" #define ICE_BAR0 0 @@ -84,6 +86,7 @@ #define ICE_FDIR_MSIX 2 #define ICE_RDMA_NUM_AEQ_MSIX 4 #define ICE_MIN_RDMA_MSIX 2 +#define ICE_ESWITCH_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -158,6 +161,12 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) +enum ice_feature { + ICE_F_DSCP, + ICE_F_SMA_CTRL, + ICE_F_MAX +}; + struct ice_txq_meta { u32 q_teid; /* Tx-scheduler element identifier */ u16 q_id; /* Entry in VSI's txq_map bitmap */ @@ -306,10 +315,6 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; - /* devlink port data */ - struct devlink_port devlink_port; - bool devlink_port_registered; - u16 max_frame; u16 rx_buf_len; @@ -349,6 +354,8 @@ struct ice_vsi { u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct net_device **target_netdevs; + /* setup back reference, to which aggregator node this VSI * corresponds to */ @@ -395,6 +402,7 @@ enum ice_pf_flags { ICE_FLAG_PTP, /* PTP is enabled by software */ ICE_FLAG_AUX_ENA, ICE_FLAG_ADV_FEATURES, + ICE_FLAG_CLS_FLOWER, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, ICE_FLAG_NO_MEDIA, @@ -408,6 +416,12 @@ enum ice_pf_flags { ICE_PF_FLAGS_NBITS /* must be last */ }; +struct ice_switchdev_info { + struct ice_vsi *control_vsi; + struct ice_vsi *uplink_vsi; + bool is_running; +}; + struct ice_agg_node { u32 agg_id; #define ICE_MAX_VSIS_IN_AGG_NODE 64 @@ -421,6 +435,9 @@ struct ice_pf { struct devlink_region *nvm_region; struct devlink_region *devcaps_region; + /* devlink port data */ + struct devlink_port devlink_port; + /* OS reserved IRQ details */ struct msix_entry *msix_entries; struct ice_res_tracker *irq_tracker; @@ -434,6 +451,7 @@ struct ice_pf { struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ + u16 eswitch_mode; /* current mode of eswitch */ /* Virtchnl/SR-IOV config info */ struct ice_vf *vf; u16 num_alloc_vfs; /* actual number of VFs allocated */ @@ -443,6 +461,7 @@ struct ice_pf { /* used to ratelimit the MDD event logging */ unsigned long last_printed_mdd_jiffies; DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ @@ -496,11 +515,15 @@ struct ice_pf { int aux_idx; u32 sw_int_count; + struct hlist_head tc_flower_fltr_list; + __le64 nvm_phy_type_lo; /* NVM PHY type low */ __le64 nvm_phy_type_hi; /* NVM PHY type high */ struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ + struct ice_switchdev_info switchdev; + #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 @@ -512,6 +535,7 @@ struct ice_pf { struct ice_netdev_priv { struct ice_vsi *vsi; + struct ice_repr *repr; }; /** @@ -597,6 +621,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) } /** + * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. + * @np: private netdev structure + */ +static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) +{ + /* In case of port representor return source port VSI. */ + if (np->repr) + return np->repr->src_vsi; + else + return np->vsi; +} + +/** * ice_get_ctrl_vsi - Get the control VSI * @pf: PF instance */ @@ -610,6 +647,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) } /** + * ice_is_switchdev_running - check if switchdev is configured + * @pf: pointer to PF structure + * + * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV + * and switchdev is configured, false otherwise. + */ +static inline bool ice_is_switchdev_running(struct ice_pf *pf) +{ + return pf->switchdev.is_running; +} + +/** * ice_set_sriov_cap - enable SRIOV in PF flags * @pf: PF struct */ @@ -637,7 +686,9 @@ bool netif_is_ice(struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi); +int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 21b4c7cd6f05..a5425f0dce3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -233,6 +233,7 @@ struct ice_aqc_get_sw_cfg_resp_elem { */ #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 +#define ICE_AQC_RES_TYPE_RECIPE 0x05 #define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 #define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 #define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 @@ -241,6 +242,7 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 +#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) @@ -474,6 +476,53 @@ struct ice_aqc_vsi_props { #define ICE_MAX_NUM_RECIPES 64 +/* Add/Get Recipe (indirect 0x0290/0x0292) */ +struct ice_aqc_add_get_recipe { + __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */ + __le16 return_index; /* Input, used for Get cmd only */ + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_recipe_content { + u8 rid; +#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7) +#define ICE_AQ_SW_ID_LKUP_IDX 0 + u8 lkup_indx[5]; +#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7) +#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF + __le16 mask[5]; + u8 result_indx; +#define ICE_AQ_RECIPE_RESULT_DATA_S 0 +#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S) +#define ICE_AQ_RECIPE_RESULT_EN BIT(7) + u8 rsvd0[3]; + u8 act_ctrl_join_priority; + u8 act_ctrl_fwd_priority; + u8 act_ctrl; +#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2) + u8 rsvd1; + __le32 dflt_act; +}; + +struct ice_aqc_recipe_data_elem { + u8 recipe_indx; + u8 resp_bits; + u8 rsvd0[2]; + u8 recipe_bitmap[8]; + u8 rsvd1[4]; + struct ice_aqc_recipe_content content; + u8 rsvd2[20]; +}; + +/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */ +struct ice_aqc_recipe_to_profile { + __le16 profile_id; + u8 rsvd[6]; + DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES); +}; + /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { @@ -671,6 +720,16 @@ struct ice_aqc_sw_rules_elem { } __packed pdata; }; +/* Query PFC Mode (direct 0x0302) + * Set PFC Mode (direct 0x0303) + */ +struct ice_aqc_set_query_pfc_mode { + u8 pfc_mode; +/* For Query Command response, reserved in all other cases */ +#define ICE_AQC_PFC_VLAN_BASED_PFC 1 +#define ICE_AQC_PFC_DSCP_BASED_PFC 2 + u8 rsvd[15]; +}; /* Get Default Topology (indirect 0x0400) */ struct ice_aqc_get_topo { u8 port_num; @@ -1220,7 +1279,7 @@ struct ice_aqc_set_mac_lb { u8 reserved[15]; }; -struct ice_aqc_link_topo_addr { +struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) @@ -1246,6 +1305,10 @@ struct ice_aqc_link_topo_addr { #define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 u8 index; +}; + +struct ice_aqc_link_topo_addr { + struct ice_aqc_link_topo_params topo_params; __le16 handle; #define ICE_AQC_LINK_TOPO_HANDLE_S 0 #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) @@ -1268,6 +1331,7 @@ struct ice_aqc_link_topo_addr { struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 u8 rsvd[9]; }; @@ -1281,6 +1345,16 @@ struct ice_aqc_set_port_id_led { u8 rsvd[13]; }; +/* Set/Get GPIO (direct, 0x06EC/0x06ED) */ +struct ice_aqc_gpio { + __le16 gpio_ctrl_handle; +#define ICE_AQC_GPIO_HANDLE_S 0 +#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S) + u8 gpio_num; + u8 gpio_val; + u8 rsvd[12]; +}; + /* Read/Write SFF EEPROM command (indirect 0x06EE) */ struct ice_aqc_sff_eeprom { u8 lport_num; @@ -1922,10 +1996,13 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_sw_rules sw_rules; + struct ice_aqc_add_get_recipe add_get_recipe; + struct ice_aqc_recipe_to_profile recipe_to_profile; struct ice_aqc_get_topo get_topo; struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; @@ -1936,6 +2013,7 @@ struct ice_aq_desc { struct ice_aqc_nvm_pkg_data pkg_data; struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl; struct ice_aqc_pf_vf_msg virt; + struct ice_aqc_set_query_pfc_mode set_query_pfc_mode; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; struct ice_aqc_lldp_stop lldp_stop; @@ -2033,6 +2111,12 @@ enum ice_adminq_opc { ice_aqc_opc_update_vsi = 0x0211, ice_aqc_opc_free_vsi = 0x0213, + /* recipe commands */ + ice_aqc_opc_add_recipe = 0x0290, + ice_aqc_opc_recipe_to_profile = 0x0291, + ice_aqc_opc_get_recipe = 0x0292, + ice_aqc_opc_get_recipe_to_profile = 0x0293, + /* switch rules population commands */ ice_aqc_opc_add_sw_rules = 0x02A0, ice_aqc_opc_update_sw_rules = 0x02A1, @@ -2040,6 +2124,10 @@ enum ice_adminq_opc { ice_aqc_opc_clear_pf_cfg = 0x02A4, + /* DCB commands */ + ice_aqc_opc_query_pfc_mode = 0x0302, + ice_aqc_opc_set_pfc_mode = 0x0303, + /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, @@ -2064,6 +2152,8 @@ enum ice_adminq_opc { ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_set_port_id_led = 0x06E9, + ice_aqc_opc_set_gpio = 0x06EC, + ice_aqc_opc_get_gpio = 0x06ED, ice_aqc_opc_sff_eeprom = 0x06EE, /* NVM commands */ diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 88d98c9e5f91..3071b8e79499 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi) if (!vsi || vsi->type != ICE_VSI_PF) return; - arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, + arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), GFP_KERNEL); if (!arfs_fltr_list) return; diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index c36057efc7ae..d7a5ac9346bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -218,6 +218,30 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) } /** + * ice_eswitch_calc_q_handle + * @ring: pointer to ring which unique index is needed + * + * To correctly work with many netdevs ring->q_index of Tx rings on switchdev + * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it + * here by finding index in vsi->tx_rings of this ring. + * + * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, + * because VSI is get from ring->vsi, so it has to be present in this VSI. + */ +static u16 ice_eswitch_calc_q_handle(struct ice_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + int i; + + ice_for_each_txq(vsi, i) { + if (vsi->tx_rings[i] == ring) + return i; + } + + return ICE_INVAL_Q_INDEX; +} + +/** * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * @@ -280,6 +304,9 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; + case ICE_VSI_SWITCHDEV_CTRL: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; + break; default: return; } @@ -746,7 +773,14 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, /* Add unique software queue handle of the Tx queue per * TC into the VSI Tx ring */ - ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + ring->q_handle = ice_eswitch_calc_q_handle(ring); + + if (ring->q_handle == ICE_INVAL_Q_INDEX) + return -ENODEV; + } else { + ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + } status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 1, qg_buf, buf_len, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2fb81e359cdf..16a25616cdc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -70,6 +70,27 @@ bool ice_is_e810(struct ice_hw *hw) } /** + * ice_is_e810t + * @hw: pointer to the hardware structure + * + * returns true if the device is E810T based, false if not. + */ +bool ice_is_e810t(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T || + hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2) + return true; + break; + default: + break; + } + + return false; +} + +/** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * @@ -240,11 +261,13 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << - ICE_AQC_LINK_TOPO_NODE_CTX_S); + cmd->addr.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << + ICE_AQC_LINK_TOPO_NODE_CTX_S); /* set node type */ - cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); + cmd->addr.topo_params.node_type_ctx |= + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } @@ -568,6 +591,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) return ICE_ERR_NO_MEMORY; INIT_LIST_HEAD(&sw->vsi_list_map_head); + sw->prof_res_bm_init = 0; status = ice_init_def_sw_recp(hw); if (status) { @@ -594,17 +618,42 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) list_del(&v_pos_map->list_entry); devm_kfree(ice_hw_to_dev(hw), v_pos_map); } - recps = hw->switch_info->recp_list; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { - struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + recps = sw->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct ice_recp_grp_entry *rg_entry, *tmprg_entry; recps[i].root_rid = i; - mutex_destroy(&recps[i].filt_rule_lock); - list_for_each_entry_safe(lst_itr, tmp_entry, - &recps[i].filt_rules, list_entry) { - list_del(&lst_itr->list_entry); - devm_kfree(ice_hw_to_dev(hw), lst_itr); + list_for_each_entry_safe(rg_entry, tmprg_entry, + &recps[i].rg_list, l_entry) { + list_del(&rg_entry->l_entry); + devm_kfree(ice_hw_to_dev(hw), rg_entry); + } + + if (recps[i].adv_rule) { + struct ice_adv_fltr_mgmt_list_entry *tmp_entry; + struct ice_adv_fltr_mgmt_list_entry *lst_itr; + + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, + list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } + } else { + struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, + list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } } + if (recps[i].root_buf) + devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); } ice_rm_all_sw_replay_rule_info(hw); devm_kfree(ice_hw_to_dev(hw), sw->recp_list); @@ -4767,6 +4816,64 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, } /** + * ice_aq_set_gpio + * @hw: pointer to the hw struct + * @gpio_ctrl_handle: GPIO controller node handle + * @pin_idx: IO Number of the GPIO that needs to be set + * @value: SW provide IO value to set in the LSB + * @cd: pointer to command details structure or NULL + * + * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology + */ +int +ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, + struct ice_sq_cd *cd) +{ + struct ice_aqc_gpio *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); + cmd = &desc.params.read_write_gpio; + cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); + cmd->gpio_num = pin_idx; + cmd->gpio_val = value ? 1 : 0; + + return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); +} + +/** + * ice_aq_get_gpio + * @hw: pointer to the hw struct + * @gpio_ctrl_handle: GPIO controller node handle + * @pin_idx: IO Number of the GPIO that needs to be set + * @value: IO value read + * @cd: pointer to command details structure or NULL + * + * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of + * the topology + */ +int +ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, + bool *value, struct ice_sq_cd *cd) +{ + struct ice_aqc_gpio *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); + cmd = &desc.params.read_write_gpio; + cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); + cmd->gpio_num = pin_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (status) + return ice_status_to_errno(status); + + *value = !!cmd->gpio_val; + return 0; +} + +/** * ice_fw_supports_link_override * @hw: pointer to the hardware structure * diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index fb16070f02e2..65c1b3244264 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -183,6 +183,7 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +bool ice_is_e810t(struct ice_hw *hw); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); @@ -192,6 +193,12 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, int ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, u32 *value, struct ice_sq_cd *cd); +int +ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, + struct ice_sq_cd *cd); +int +ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, + bool *value, struct ice_sq_cd *cd); enum ice_status ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 849fcf605479..241427cd9bc0 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_common.h" +#include "ice_lib.h" #include "ice_sched.h" #include "ice_dcb.h" @@ -736,6 +737,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, } /** + * ice_aq_set_pfc_mode - Set PFC mode + * @hw: pointer to the HW struct + * @pfc_mode: value of PFC mode to set + * @cd: pointer to command details structure or NULL + * + * This AQ call configures the PFC mode to DSCP-based PFC mode or + * VLAN-based PFC (0x0303) + */ +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_query_pfc_mode *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) + return -EINVAL; + + cmd = &desc.params.set_query_pfc_mode; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode); + + cmd->pfc_mode = pfc_mode; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (status) + return ice_status_to_errno(status); + + /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is + * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has + * been executed, check if cmd->pfc_mode is what was requested. If not, + * return an error. + */ + if (cmd->pfc_mode != pfc_mode) + return -EOPNOTSUPP; + + return 0; +} + +/** * ice_cee_to_dcb_cfg * @cee_cfg: pointer to CEE configuration struct * @pi: port information structure @@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, } /** - * ice_add_dcb_tlv - Add all IEEE TLVs + * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV + * @tlv: location to build the TLV data + * @dcbcfg: location of data to convert to TLV + */ +static void +ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + int i; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_UP_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_DSCP2UP); + tlv->ouisubtype = htonl(ouisubtype); + + /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ + for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { + /* IPv4 mapping */ + buf[i] = dcbcfg->dscp_map[i]; + /* IPv6 mapping */ + buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i]; + } + + /* byte 64 - IPv4 untagged traffic */ + buf[i] = 0; + + /* byte 144 - IPv6 untagged traffic */ + buf[i + ICE_DSCP_IPV6_OFFSET] = 0; +} + +#define ICE_BYTES_PER_TC 8 +/** + * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV + * @tlv: location to build the TLV data + */ +static void +ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_ENF_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_ENFORCE); + tlv->ouisubtype = htonl(ouisubtype); + + /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */ + memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC)); +} + +/** + * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV + * @tlv: location to build the TLV data + * @dcbcfg: location of the data to convert to TLV + */ +static void +ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u8 offset = 0; + u16 typelen; + int i; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_TC_BW_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_TCBW); + tlv->ouisubtype = htonl(ouisubtype); + + /* First Octect after subtype + * ---------------------------- + * | RSV | CBS | RSV | Max TCs | + * | 1b | 1b | 3b | 3b | + * ---------------------------- + */ + etscfg = &dcbcfg->etscfg; + buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; + + /* bytes 1 - 4 reserved */ + offset = 5; + + /* TC BW table + * bytes 0 - 7 for TC 0 - 7 + * + * TSA Assignment table + * bytes 8 - 15 for TC 0 - 7 + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + buf[offset] = etscfg->tcbwtable[i]; + buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i]; + offset++; + } +} + +/** + * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store which holds the PFC CFG data + */ +static void +ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_PFC); + tlv->ouisubtype = htonl(ouisubtype); + + buf[0] = dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcena & 0xF; +} + +/** + * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs * @tlv: Fill TLV data in IEEE format * @dcbcfg: Local store which holds the DCB Config * @tlvid: Type of IEEE TLV @@ -1218,21 +1391,41 @@ static void ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, u16 tlvid) { - switch (tlvid) { - case ICE_IEEE_TLV_ID_ETS_CFG: - ice_add_ieee_ets_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_ETS_REC: - ice_add_ieee_etsrec_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_PFC_CFG: - ice_add_ieee_pfc_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_APP_PRI: - ice_add_ieee_app_pri_tlv(tlv, dcbcfg); - break; - default: - break; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) { + switch (tlvid) { + case ICE_IEEE_TLV_ID_ETS_CFG: + ice_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_ETS_REC: + ice_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_PFC_CFG: + ice_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_APP_PRI: + ice_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } + } else { + /* pfc_mode == ICE_QOS_MODE_DSCP */ + switch (tlvid) { + case ICE_TLV_ID_DSCP_UP: + ice_add_dscp_up_tlv(tlv, dcbcfg); + break; + case ICE_TLV_ID_DSCP_ENF: + ice_add_dscp_enf_tlv(tlv); + break; + case ICE_TLV_ID_DSCP_TC_BW: + ice_add_dscp_tc_bw_tlv(tlv, dcbcfg); + break; + case ICE_TLV_ID_DSCP_TO_PFC: + ice_add_dscp_pfc_tlv(tlv, dcbcfg); + break; + default: + break; + } } } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index d7e5e6178a21..9b6f87a889a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -22,6 +22,14 @@ #define ICE_CEE_DCBX_OUI 0x001B21 #define ICE_CEE_DCBX_TYPE 2 + +#define ICE_DSCP_OUI 0xFFFFFF +#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41 +#define ICE_DSCP_SUBTYPE_ENFORCE 0x42 +#define ICE_DSCP_SUBTYPE_TCBW 0x43 +#define ICE_DSCP_SUBTYPE_PFC 0x44 +#define ICE_DSCP_IPV6_OFFSET 80 + #define ICE_CEE_SUBTYPE_PG_CFG 2 #define ICE_CEE_SUBTYPE_PFC_CFG 3 #define ICE_CEE_SUBTYPE_APP_PRI 4 @@ -78,11 +86,20 @@ #define ICE_IEEE_TLV_ID_APP_PRI 6 #define ICE_TLV_ID_END_OF_LLDPPDU 7 #define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG +#define ICE_TLV_ID_DSCP_UP 3 +#define ICE_TLV_ID_DSCP_ENF 4 +#define ICE_TLV_ID_DSCP_TC_BW 5 +#define ICE_TLV_ID_DSCP_TO_PFC 6 #define ICE_IEEE_ETS_TLV_LEN 25 #define ICE_IEEE_PFC_TLV_LEN 6 #define ICE_IEEE_APP_TLV_LEN 11 +#define ICE_DSCP_UP_TLV_LEN 148 +#define ICE_DSCP_ENF_TLV_LEN 132 +#define ICE_DSCP_TC_BW_TLV_LEN 25 +#define ICE_DSCP_PFC_TLV_LEN 6 + /* IEEE 802.1AB LLDP Organization specific TLV */ struct ice_lldp_org_tlv { __be16 typelen; @@ -120,6 +137,7 @@ struct ice_cee_app_prio { u8 prio_map; } __packed; +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); enum ice_status ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 926cf748c5ec..73714685fb68 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -544,7 +544,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) * @ets_willing: configure ETS willing * @locked: was this function called with RTNL held */ -static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) +int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) { struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *dcbcfg; @@ -683,6 +683,11 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) vsi->idx); continue; } + /* no need to proceed with remaining cfg if it is switchdev + * VSI + */ + if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) + continue; ice_vsi_map_rings_to_vectors(vsi); if (vsi->type == ICE_VSI_PF) @@ -726,6 +731,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, + NULL); + if (err) + dev_info(dev, "Failed to set VLAN PFC mode\n"); + err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) { dev_err(dev, "Failed to set local DCB config %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 261b6e2ed7bc..3dcde1750a5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -15,6 +15,7 @@ #define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */ void ice_dcb_rebuild(struct ice_pf *pf); +int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi); @@ -59,6 +60,12 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf) return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) || test_bit(ICE_FLAG_DCB_ENA, pf->flags)); } + +static inline u8 ice_get_pfc_mode(struct ice_pf *pf) +{ + return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode; +} + #else static inline void ice_dcb_rebuild(struct ice_pf *pf) { } @@ -113,6 +120,11 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, return false; } +static inline u8 ice_get_pfc_mode(struct ice_pf *pf) +{ + return 0; +} + static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 4180f1f35fb8..7fdeb411b6df 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcbx_cfg *new_cfg; int bwcfg = 0, bwrec = 0; - int err, i, max_tc = 0; + int err, i; if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i]; bwcfg += ets->tc_tx_bw[i]; new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i]; - new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; + if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) { + /* in DSCP mode up->tc mapping cannot change */ + new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; + new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; + } new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i]; bwrec += ets->tc_reco_bw[i]; new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i]; - new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; } if (ice_dcb_bwchk(pf, new_cfg)) { @@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) goto ets_out; } - max_tc = pf->hw.func_caps.common_cap.maxtc; - - new_cfg->etscfg.maxtcs = max_tc; - - if (!bwcfg) - new_cfg->etscfg.tcbwtable[0] = 100; + new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) + if (mode & DCB_CAP_DCBX_VER_CEE) { + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - else + } else { qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; + } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; @@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg, return false; } +#define ICE_BYTES_PER_DSCP_VAL 8 + /** * ice_dcbnl_setapp - set local IEEE App config * @netdev: relevant netdev struct @@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcb_app_priority_table new_app; struct ice_dcbx_cfg *old_cfg, *new_cfg; + u8 max_tc; int ret; - if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || - !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + /* ONLY DSCP APP TLVs have operational significance */ + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) return -EINVAL; - mutex_lock(&pf->tc_mutex); + /* only allow APP TLVs in SW Mode */ + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n"); + return -EINVAL; + } - new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; - old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (!ice_is_feature_supported(pf, ICE_F_DSCP)) + return -EOPNOTSUPP; - if (old_cfg->numapps == ICE_DCBX_MAX_APPS) { - ret = -EINVAL; - goto setapp_out; + if (app->protocol >= ICE_DSCP_NUM_VAL) { + netdev_err(netdev, "DSCP value 0x%04X out of range\n", + app->protocol); + return -EINVAL; + } + + max_tc = pf->hw.func_caps.common_cap.maxtc; + if (app->priority >= max_tc) { + netdev_err(netdev, "TC %d out of range, max TC %d\n", + app->priority, max_tc); + return -EINVAL; } + /* grab TC mutex */ + mutex_lock(&pf->tc_mutex); + + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; + old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + ret = dcb_ieee_setapp(netdev, app); if (ret) goto setapp_out; + if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) { + netdev_err(netdev, "DSCP value 0x%04X already user mapped\n", + app->protocol); + ret = dcb_ieee_delapp(netdev, app); + if (ret) + netdev_err(netdev, "Failed to delete re-mapping TLV\n"); + ret = -EINVAL; + goto setapp_out; + } + new_app.selector = app->selector; new_app.prot_id = app->protocol; new_app.priority = app->priority; - if (ice_dcbnl_find_app(old_cfg, &new_app)) { - ret = 0; - goto setapp_out; - } + /* If port is not in DSCP mode, need to set */ + if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) { + int i, j; + + /* set DSCP mode */ + ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC, + NULL); + if (ret) { + netdev_err(netdev, "Failed to set DSCP PFC mode %d\n", + ret); + goto setapp_out; + } + netdev_info(netdev, "Switched QoS to L3 DSCP mode\n"); + + new_cfg->pfc_mode = ICE_QOS_MODE_DSCP; + + /* set default DSCP QoS values */ + new_cfg->etscfg.willing = 0; + new_cfg->pfc.pfccap = max_tc; + new_cfg->pfc.willing = 0; + + for (i = 0; i < max_tc; i++) + for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) { + int dscp, offset; + + dscp = (i * max_tc) + j; + offset = max_tc * ICE_BYTES_PER_DSCP_VAL; + + new_cfg->dscp_map[dscp] = i; + /* if less that 8 TCs supported */ + if (max_tc < ICE_MAX_TRAFFIC_CLASS) + new_cfg->dscp_map[dscp + offset] = i; + } + + new_cfg->etscfg.tcbwtable[0] = 100; + new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; + new_cfg->etscfg.prio_table[0] = 0; + + for (i = 1; i < max_tc; i++) { + new_cfg->etscfg.tcbwtable[i] = 0; + new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + new_cfg->etscfg.prio_table[i] = i; + } + } /* end of switching to DSCP mode */ + + /* apply new mapping for this DSCP value */ + new_cfg->dscp_map[app->protocol] = app->priority; new_cfg->app[new_cfg->numapps++] = new_app; + ret = ice_pf_dcb_cfg(pf, new_cfg, true); /* return of zero indicates new cfg applied */ if (ret == ICE_DCB_HW_CHG_RST) ice_dcbnl_devreset(netdev); - if (ret == ICE_DCB_NO_HW_CHG) - ret = ICE_DCB_HW_CHG_RST; + else + ret = ICE_DCB_NO_HW_CHG; setapp_out: mutex_unlock(&pf->tc_mutex); @@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) unsigned int i, j; int ret = 0; - if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n"); return -EINVAL; + } mutex_lock(&pf->tc_mutex); old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - if (old_cfg->numapps <= 1) - goto delapp_out; - ret = dcb_ieee_delapp(netdev, app); if (ret) goto delapp_out; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; - for (i = 1; i < new_cfg->numapps; i++) { + for (i = 0; i < new_cfg->numapps; i++) { if (app->selector == new_cfg->app[i].selector && app->protocol == new_cfg->app[i].prot_id && app->priority == new_cfg->app[i].priority) { @@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) new_cfg->numapps--; for (j = i; j < new_cfg->numapps; j++) { - new_cfg->app[i].selector = old_cfg->app[j + 1].selector; - new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id; - new_cfg->app[i].priority = old_cfg->app[j + 1].priority; + new_cfg->app[j].selector = old_cfg->app[j + 1].selector; + new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id; + new_cfg->app[j].priority = old_cfg->app[j + 1].priority; } - ret = ice_pf_dcb_cfg(pf, new_cfg, true); - /* return of zero indicates new cfg applied */ + /* if not a DSCP APP TLV or DSCP is not supported, we are done */ + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + !ice_is_feature_supported(pf, ICE_F_DSCP)) { + ret = ICE_DCB_HW_CHG; + goto delapp_out; + } + + /* if DSCP TLV, then need to address change in mapping */ + clear_bit(app->protocol, new_cfg->dscp_mapped); + /* remap this DSCP value to default value */ + new_cfg->dscp_map[app->protocol] = app->protocol % + ICE_BYTES_PER_DSCP_VAL; + + /* if the last DSCP mapping just got deleted, need to switch + * to L2 VLAN QoS mode + */ + if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) && + new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) { + ret = ice_aq_set_pfc_mode(&pf->hw, + ICE_AQC_PFC_VLAN_BASED_PFC, + NULL); + if (ret) { + netdev_info(netdev, "Failed to set VLAN PFC mode %d\n", + ret); + goto delapp_out; + } + netdev_info(netdev, "Switched QoS to L2 VLAN mode\n"); + + new_cfg->pfc_mode = ICE_QOS_MODE_VLAN; + + ret = ice_dcb_sw_dflt_cfg(pf, true, true); + } else { + ret = ice_pf_dcb_cfg(pf, new_cfg, true); + } + + /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied + * and reset needs to be performed + */ if (ret == ICE_DCB_HW_CHG_RST) ice_dcbnl_devreset(netdev); + + /* if the change was not siginificant enough to actually call + * the reconfiguration flow, we still need to tell caller that + * their request was successfully handled + */ if (ret == ICE_DCB_NO_HW_CHG) - ret = ICE_DCB_HW_CHG_RST; + ret = ICE_DCB_HW_CHG; delapp_out: mutex_unlock(&pf->tc_mutex); diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 9d8194671f6a..8d2c39ee775b 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -21,6 +21,8 @@ #define ICE_DEV_ID_E810C_QSFP 0x1592 /* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_E810C_SFP 0x1593 +#define ICE_SUBDEV_ID_E810T 0x000E +#define ICE_SUBDEV_ID_E810T2 0x000F /* Intel(R) Ethernet Controller E810-XXV for SFP */ #define ICE_DEV_ID_E810_XXV_SFP 0x159B /* Intel(R) Ethernet Connection E823-C for backplane */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 14afce82ef63..55353bf4cbef 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_lib.h" #include "ice_devlink.h" +#include "ice_eswitch.h" #include "ice_fw_update.h" /* context for devlink info version reporting */ @@ -22,7 +23,7 @@ struct ice_info_ctx { * * If a version does not exist, for example when attempting to get the * inactive version of flash when there is no pending update, the function - * should leave the buffer in the ctx structure empty and return 0. + * should leave the buffer in the ctx structure empty. */ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -35,7 +36,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); } -static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; enum ice_status status; @@ -45,148 +46,127 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) /* We failed to locate the PBA, so just skip this entry */ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", ice_stat_str(status)); - - return 0; } -static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, - hw->fw_patch); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); } -static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", + hw->api_maj_ver, hw->api_min_ver); } -static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); - - return 0; } -static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &pf->hw.flash.orom; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + orom->major, orom->build, orom->patch); } -static int -ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &ctx->pending_orom; if (ctx->dev_caps.common_cap.nvm_update_pending_orom) snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); - - return 0; } -static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &pf->hw.flash.nvm; snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); - - return 0; } -static int -ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", + nvm->major, nvm->minor); } -static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &pf->hw.flash.nvm; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); - - return 0; } -static int -ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); - - return 0; } -static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); - - return 0; } -static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update, - pkg->draft); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", + pkg->major, pkg->minor, pkg->update, pkg->draft); } -static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) { snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); - - return 0; } -static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; /* The netlist version fields are BCD formatted */ - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", + netlist->major, netlist->minor, + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); - - return 0; } -static int -ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; @@ -194,21 +174,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); - - return 0; + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static int -ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); - - return 0; } #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } @@ -238,8 +215,8 @@ enum ice_version_type { static const struct ice_devlink_version { enum ice_version_type type; const char *key; - int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); - int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); } ice_devlink_versions[] = { fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), @@ -351,24 +328,15 @@ static int ice_devlink_info_get(struct devlink *devlink, memset(ctx->buf, 0, sizeof(ctx->buf)); - err = ice_devlink_versions[i].getter(pf, ctx); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); - goto out_free_ctx; - } + ice_devlink_versions[i].getter(pf, ctx); /* If the default getter doesn't report a version, use the * fallback function. This is primarily useful in the case of * "stored" versions that want to report the same value as the * running version in the normal case of no pending update. */ - if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) { - err = ice_devlink_versions[i].fallback(pf, ctx); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); - goto out_free_ctx; - } - } + if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) + ice_devlink_versions[i].fallback(pf, ctx); /* Do not report missing versions */ if (ctx->buf[0] == '\0') @@ -456,6 +424,8 @@ ice_devlink_flash_update(struct devlink *devlink, static const struct devlink_ops ice_devlink_ops = { .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, + .eswitch_mode_get = ice_eswitch_mode_get, + .eswitch_mode_set = ice_eswitch_mode_set, .info_get = ice_devlink_info_get, .flash_update = ice_devlink_flash_update, }; @@ -498,15 +468,58 @@ struct ice_pf *ice_allocate_pf(struct device *dev) * * Return: zero on success or an error code on failure. */ -int ice_devlink_register(struct ice_pf *pf) +void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct device *dev = ice_pf_to_dev(pf); + + devlink_register(devlink); +} + +/** + * ice_devlink_unregister - Unregister devlink resources for this PF. + * @pf: the PF structure to cleanup + * + * Releases resources used by devlink and cleans up associated memory. + */ +void ice_devlink_unregister(struct ice_pf *pf) +{ + devlink_unregister(priv_to_devlink(pf)); +} + +/** + * ice_devlink_create_pf_port - Create a devlink port for this PF + * @pf: the PF to create a devlink port for + * + * Create and register a devlink_port for this PF. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_pf_port(struct ice_pf *pf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; int err; - err = devlink_register(devlink); + dev = ice_pf_to_dev(pf); + + devlink_port = &pf->devlink_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EIO; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.bus.func; + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "devlink registration failed: %d\n", err); + dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", + pf->hw.pf_id, err); return err; } @@ -514,71 +527,75 @@ int ice_devlink_register(struct ice_pf *pf) } /** - * ice_devlink_unregister - Unregister devlink resources for this PF. - * @pf: the PF structure to cleanup + * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup * - * Releases resources used by devlink and cleans up associated memory. + * Unregisters the devlink_port structure associated with this PF. */ -void ice_devlink_unregister(struct ice_pf *pf) +void ice_devlink_destroy_pf_port(struct ice_pf *pf) { - devlink_unregister(priv_to_devlink(pf)); + struct devlink_port *devlink_port; + + devlink_port = &pf->devlink_port; + + devlink_port_type_clear(devlink_port); + devlink_port_unregister(devlink_port); } /** - * ice_devlink_create_port - Create a devlink port for this VSI - * @vsi: the VSI to create a port for + * ice_devlink_create_vf_port - Create a devlink port for this VF + * @vf: the VF to create a port for * - * Create and register a devlink_port for this VSI. + * Create and register a devlink_port for this VF. * * Return: zero on success or an error code on failure. */ -int ice_devlink_create_port(struct ice_vsi *vsi) +int ice_devlink_create_vf_port(struct ice_vf *vf) { struct devlink_port_attrs attrs = {}; - struct ice_port_info *pi; + struct devlink_port *devlink_port; struct devlink *devlink; + struct ice_vsi *vsi; struct device *dev; struct ice_pf *pf; int err; - /* Currently we only create devlink_port instances for PF VSIs */ - if (vsi->type != ICE_VSI_PF) - return -EINVAL; + pf = vf->pf; + dev = ice_pf_to_dev(pf); + vsi = ice_get_vf_vsi(vf); + devlink_port = &vf->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = pf->hw.bus.func; + attrs.pci_vf.vf = vf->vf_id; - pf = vsi->back; + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); - dev = ice_pf_to_dev(pf); - pi = pf->hw.port_info; - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pi->lport; - devlink_port_attrs_set(&vsi->devlink_port, &attrs); - err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx); + err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "devlink_port_register failed: %d\n", err); + dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", + vf->vf_id, err); return err; } - vsi->devlink_port_registered = true; - return 0; } /** - * ice_devlink_destroy_port - Destroy the devlink_port for this VSI - * @vsi: the VSI to cleanup + * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF + * @vf: the VF to cleanup * - * Unregisters the devlink_port structure associated with this VSI. + * Unregisters the devlink_port structure associated with this VF. */ -void ice_devlink_destroy_port(struct ice_vsi *vsi) +void ice_devlink_destroy_vf_port(struct ice_vf *vf) { - if (!vsi->devlink_port_registered) - return; + struct devlink_port *devlink_port; - devlink_port_type_clear(&vsi->devlink_port); - devlink_port_unregister(&vsi->devlink_port); + devlink_port = &vf->devlink_port; - vsi->devlink_port_registered = false; + devlink_port_type_clear(devlink_port); + devlink_port_unregister(devlink_port); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index e07e74426bde..b7f9551e4fc4 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -6,10 +6,12 @@ struct ice_pf *ice_allocate_pf(struct device *dev); -int ice_devlink_register(struct ice_pf *pf); +void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); -int ice_devlink_create_port(struct ice_vsi *vsi); -void ice_devlink_destroy_port(struct ice_vsi *vsi); +int ice_devlink_create_pf_port(struct ice_pf *pf); +void ice_devlink_destroy_pf_port(struct ice_pf *pf); +int ice_devlink_create_vf_port(struct ice_vf *vf); +void ice_devlink_destroy_vf_port(struct ice_vf *vf); void ice_devlink_init_regions(struct ice_pf *pf); void ice_devlink_destroy_regions(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c new file mode 100644 index 000000000000..d91a7834f91f --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -0,0 +1,660 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_eswitch.h" +#include "ice_fltr.h" +#include "ice_repr.h" +#include "ice_devlink.h" +#include "ice_tc_lib.h" + +/** + * ice_eswitch_setup_env - configure switchdev HW filters + * @pf: pointer to PF struct + * + * This function adds HW filters configuration specific for switchdev + * mode. + */ +static int ice_eswitch_setup_env(struct ice_pf *pf) +{ + struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_port_info *pi = pf->hw.port_info; + bool rule_added = false; + + ice_vsi_manage_vlan_stripping(ctrl_vsi, false); + + ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + + if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI)) + goto err_def_rx; + + if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { + if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) + goto err_def_rx; + rule_added = true; + } + + if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX)) + goto err_def_tx; + + if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) + goto err_override_uplink; + + if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) + goto err_override_control; + + if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id, + ICE_FLTR_TX, + ICE_SINGLE_ACT_LB_ENABLE)) + goto err_update_action; + + return 0; + +err_update_action: + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); +err_override_control: + ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); +err_override_uplink: + ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); +err_def_tx: + if (rule_added) + ice_clear_dflt_vsi(uplink_vsi->vsw); +err_def_rx: + ice_fltr_add_mac_and_broadcast(uplink_vsi, + uplink_vsi->port_info->mac.perm_addr, + ICE_FWD_TO_VSI); + return -ENODEV; +} + +/** + * ice_eswitch_remap_ring - reconfigure ring of switchdev ctrl VSI + * @ring: pointer to ring + * @q_vector: pointer of q_vector which is connected with this ring + * @netdev: netdevice connected with this ring + */ +static void +ice_eswitch_remap_ring(struct ice_ring *ring, struct ice_q_vector *q_vector, + struct net_device *netdev) +{ + ring->q_vector = q_vector; + ring->next = NULL; + ring->netdev = netdev; +} + +/** + * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI + * @pf: pointer to PF struct + * + * In switchdev number of allocated Tx/Rx rings is equal. + * + * This function fills q_vectors structures associated with representor and + * move each ring pairs to port representor netdevs. Each port representor + * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to + * number of VFs. + */ +static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +{ + struct ice_vsi *vsi = pf->switchdev.control_vsi; + int q_id; + + ice_for_each_txq(vsi, q_id) { + struct ice_repr *repr = pf->vf[q_id].repr; + struct ice_q_vector *q_vector = repr->q_vector; + struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + + q_vector->vsi = vsi; + q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; + + q_vector->num_ring_tx = 1; + q_vector->tx.ring = tx_ring; + ice_eswitch_remap_ring(tx_ring, q_vector, repr->netdev); + /* In switchdev mode, from OS stack perspective, there is only + * one queue for given netdev, so it needs to be indexed as 0. + */ + tx_ring->q_index = 0; + + q_vector->num_ring_rx = 1; + q_vector->rx.ring = rx_ring; + ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev); + } +} + +/** + * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode + * @pf: pointer to PF struct + */ +static int ice_eswitch_setup_reprs(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + int max_vsi_num = 0; + int i; + + ice_for_each_vf(pf, i) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf->repr->dst) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + goto err; + } + + if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + metadata_dst_free(vf->repr->dst); + goto err; + } + + if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + metadata_dst_free(vf->repr->dst); + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + goto err; + } + + if (max_vsi_num < vsi->vsi_num) + max_vsi_num = vsi->vsi_num; + + netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + + netif_keep_dst(vf->repr->netdev); + } + + kfree(ctrl_vsi->target_netdevs); + + ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1, + sizeof(*ctrl_vsi->target_netdevs), + GFP_KERNEL); + if (!ctrl_vsi->target_netdevs) + goto err; + + ice_for_each_vf(pf, i) { + struct ice_repr *repr = pf->vf[i].repr; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; + + ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; + + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = repr->netdev; + ice_repr_set_traffic_vsi(repr, ctrl_vsi); + } + + return 0; + +err: + for (i = i - 1; i >= 0; i--) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + } + + return -ENODEV; +} + +/** + * ice_eswitch_release_reprs - clear PR VSIs configuration + * @pf: poiner to PF struct + * @ctrl_vsi: pointer to switchdev control VSI + */ +static void +ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +{ + int i; + + kfree(ctrl_vsi->target_netdevs); + ice_for_each_vf(pf, i) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + + netif_napi_del(&vf->repr->q_vector->napi); + } +} + +/** + * ice_eswitch_update_repr - reconfigure VF port representor + * @vsi: VF VSI for which port representor is configured + */ +void ice_eswitch_update_repr(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_repr *repr; + struct ice_vf *vf; + int ret; + + if (!ice_is_switchdev_running(pf)) + return; + + vf = &pf->vf[vsi->vf_id]; + repr = vf->repr; + repr->src_vsi = vsi; + repr->dst->u.port_info.port_id = vsi->vsi_num; + + ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (ret) { + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); + } +} + +/** + * ice_eswitch_port_start_xmit - callback for packets transmit + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_repr *repr; + struct ice_vsi *vsi; + + np = netdev_priv(netdev); + vsi = np->vsi; + + if (ice_is_reset_in_progress(vsi->back->state)) + return NETDEV_TX_BUSY; + + repr = ice_netdev_to_repr(netdev); + skb_dst_drop(skb); + dst_hold((struct dst_entry *)repr->dst); + skb_dst_set(skb, (struct dst_entry *)repr->dst); + skb->queue_mapping = repr->vf->vf_id; + + return ice_start_xmit(skb, netdev); +} + +/** + * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * @skb: pointer to send buffer + * @off: pointer to offload struct + */ +void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) +{ + struct metadata_dst *dst = skb_metadata_dst(skb); + u64 cd_cmd, dst_vsi; + + if (!dst) { + cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; + off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); + } else { + cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; + dst_vsi = ((u64)dst->u.port_info.port_id << + ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; + off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; + } +} + +/** + * ice_eswitch_release_env - clear switchdev HW filters + * @pf: pointer to PF struct + * + * This function removes HW filters configuration specific for switchdev + * mode and restores default legacy mode settings. + */ +static void ice_eswitch_release_env(struct ice_pf *pf) +{ + struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); + ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); + ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); + ice_clear_dflt_vsi(uplink_vsi->vsw); + ice_fltr_add_mac_and_broadcast(uplink_vsi, + uplink_vsi->port_info->mac.perm_addr, + ICE_FWD_TO_VSI); +} + +/** + * ice_eswitch_vsi_setup - configure switchdev control VSI + * @pf: pointer to PF structure + * @pi: pointer to port_info structure + */ +static struct ice_vsi * +ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID); +} + +/** + * ice_eswitch_napi_del - remove NAPI handle for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_del(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + netif_napi_del(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_napi_enable - enable NAPI for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_enable(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + napi_enable(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_napi_disable - disable NAPI for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_disable(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + napi_disable(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI + * @vsi: VSI to setup rxdid on + * @rxdid: flex descriptor id + */ +static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) +{ + struct ice_hw *hw = &vsi->back->hw; + int i; + + ice_for_each_rxq(vsi, i) { + struct ice_ring *ring = vsi->rx_rings[i]; + u16 pf_q = vsi->rxq_map[ring->q_index]; + + ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); + } +} + +/** + * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode + * @pf: pointer to PF structure + */ +static int ice_eswitch_enable_switchdev(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi; + + pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); + if (!pf->switchdev.control_vsi) + return -ENODEV; + + ctrl_vsi = pf->switchdev.control_vsi; + pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); + if (!pf->switchdev.uplink_vsi) + goto err_vsi; + + if (ice_eswitch_setup_env(pf)) + goto err_vsi; + + if (ice_repr_add_for_all_vfs(pf)) + goto err_repr_add; + + if (ice_eswitch_setup_reprs(pf)) + goto err_setup_reprs; + + ice_eswitch_remap_rings_to_vectors(pf); + + if (ice_vsi_open(ctrl_vsi)) + goto err_setup_reprs; + + ice_eswitch_napi_enable(pf); + + ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); + + return 0; + +err_setup_reprs: + ice_repr_rem_from_all_vfs(pf); +err_repr_add: + ice_eswitch_release_env(pf); +err_vsi: + ice_vsi_release(ctrl_vsi); + return -ENODEV; +} + +/** + * ice_eswitch_disable_switchdev - disable switchdev resources + * @pf: pointer to PF structure + */ +static void ice_eswitch_disable_switchdev(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + + ice_eswitch_napi_disable(pf); + ice_eswitch_release_env(pf); + ice_eswitch_release_reprs(pf, ctrl_vsi); + ice_vsi_release(ctrl_vsi); + ice_repr_rem_from_all_vfs(pf); +} + +/** + * ice_eswitch_mode_set - set new eswitch mode + * @devlink: pointer to devlink structure + * @mode: eswitch mode to switch to + * @extack: pointer to extack structure + */ +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (pf->eswitch_mode == mode) + return 0; + + if (pf->num_alloc_vfs) { + dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); + NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); + return -EOPNOTSUPP; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", + pf->hw.pf_id); + NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + { + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", + pf->hw.pf_id); + NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); + break; + } + default: + NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); + return -EINVAL; + } + + pf->eswitch_mode = mode; + return 0; +} + +/** + * ice_eswitch_get_target_netdev - return port representor netdev + * @rx_ring: pointer to Rx ring + * @rx_desc: pointer to Rx descriptor + * + * When working in switchdev mode context (when control VSI is used), this + * function returns netdev of appropriate port representor. For non-switchdev + * context, regular netdev associated with Rx ring is returned. + */ +struct net_device * +ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_vsi *vsi = rx_ring->vsi; + struct ice_vsi *control_vsi; + u16 target_vsi_id; + + control_vsi = vsi->back->switchdev.control_vsi; + if (vsi != control_vsi) + return rx_ring->netdev; + + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + target_vsi_id = le16_to_cpu(desc->src_vsi); + + return vsi->target_netdevs[target_vsi_id]; +} + +/** + * ice_eswitch_mode_get - get current eswitch mode + * @devlink: pointer to devlink structure + * @mode: output parameter for current eswitch mode + */ +int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct ice_pf *pf = devlink_priv(devlink); + + *mode = pf->eswitch_mode; + return 0; +} + +/** + * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev + * @pf: pointer to PF structure + * + * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV, + * false otherwise. + */ +bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) +{ + return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; +} + +/** + * ice_eswitch_release - cleanup eswitch + * @pf: pointer to PF structure + */ +void ice_eswitch_release(struct ice_pf *pf) +{ + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + return; + + ice_eswitch_disable_switchdev(pf); + pf->switchdev.is_running = false; +} + +/** + * ice_eswitch_configure - configure eswitch + * @pf: pointer to PF structure + */ +int ice_eswitch_configure(struct ice_pf *pf) +{ + int status; + + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) + return 0; + + status = ice_eswitch_enable_switchdev(pf); + if (status) + return status; + + pf->switchdev.is_running = true; + return 0; +} + +/** + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + int i; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + ice_for_each_vf(pf, i) { + repr = pf->vf[i].repr; + if (repr) + ice_repr_start_tx_queues(repr); + } +} + +/** + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors + * @pf: pointer to PF structure + */ +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + int i; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + ice_for_each_vf(pf, i) { + repr = pf->vf[i].repr; + if (repr) + ice_repr_stop_tx_queues(repr); + } +} + +/** + * ice_eswitch_rebuild - rebuild eswitch + * @pf: pointer to PF structure + */ +int ice_eswitch_rebuild(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + int status; + + ice_eswitch_napi_disable(pf); + ice_eswitch_napi_del(pf); + + status = ice_eswitch_setup_env(pf); + if (status) + return status; + + status = ice_eswitch_setup_reprs(pf); + if (status) + return status; + + ice_eswitch_remap_rings_to_vectors(pf); + + ice_replay_tc_fltrs(pf); + + status = ice_vsi_open(ctrl_vsi); + if (status) + return status; + + ice_eswitch_napi_enable(pf); + ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); + ice_eswitch_start_all_tx_queues(pf); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h new file mode 100644 index 000000000000..23df0d400847 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_ESWITCH_H_ +#define _ICE_ESWITCH_H_ + +#include <net/devlink.h> + +#ifdef CONFIG_ICE_SWITCHDEV +void ice_eswitch_release(struct ice_pf *pf); +int ice_eswitch_configure(struct ice_pf *pf); +int ice_eswitch_rebuild(struct ice_pf *pf); + +int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); +bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); + +void ice_eswitch_update_repr(struct ice_vsi *vsi); + +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); + +struct net_device * +ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc); + +void ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off); +netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); +#else /* CONFIG_ICE_SWITCHDEV */ +static inline void ice_eswitch_release(struct ice_pf *pf) { } + +static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } + +static inline void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) { } + +static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } + +static inline int ice_eswitch_configure(struct ice_pf *pf) +{ + return -EOPNOTSUPP; +} + +static inline int ice_eswitch_rebuild(struct ice_pf *pf) +{ + return -EOPNOTSUPP; +} + +static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + return DEVLINK_ESWITCH_MODE_LEGACY; +} + +static inline int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) +{ + return false; +} + +static inline struct net_device * +ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + return rx_ring->netdev; +} + +static inline netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + return NETDEV_TX_BUSY; +} +#endif /* CONFIG_ICE_SWITCHDEV */ +#endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c451cf401e63..201979cc47fb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) static void -ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct ice_orom_info *orom; @@ -196,6 +195,26 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } +static void +ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + __ice_get_drvinfo(netdev, drvinfo, np->vsi); +} + +static void +ice_repr_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + if (ice_check_vf_ready_for_cfg(repr->vf)) + return; + + __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); +} + static int ice_get_regs_len(struct net_device __always_unused *netdev) { return sizeof(ice_regs_dump_list); @@ -869,7 +888,7 @@ skip_ol_tests: static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; + struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); unsigned int i; u8 *p = data; @@ -879,6 +898,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ethtool_sprintf(&p, ice_gstrings_vsi_stats[i].stat_string); + if (ice_is_port_repr_netdev(netdev)) + return; + ice_for_each_alloc_txq(vsi, i) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -1215,6 +1237,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) enum ice_status status; bool dcbx_agent_status; + if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { + clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n"); + ret = -EOPNOTSUPP; + goto ethtool_exit; + } + /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. */ @@ -1301,6 +1330,9 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * order of strings will suffer from race conditions and are * not safe. */ + if (ice_is_port_repr_netdev(netdev)) + return ICE_VSI_STATS_LEN; + return ICE_ALL_STATS_LEN(netdev); case ETH_SS_TEST: return ICE_TEST_LEN; @@ -1316,7 +1348,7 @@ ice_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; + struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); struct ice_pf *pf = vsi->back; struct ice_ring *ring; unsigned int j; @@ -1332,6 +1364,9 @@ ice_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + if (ice_is_port_repr_netdev(netdev)) + return; + /* populate per queue stats */ rcu_read_lock(); @@ -4055,6 +4090,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; } +static const struct ethtool_ops ice_ethtool_repr_ops = { + .get_drvinfo = ice_repr_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = ice_get_strings, + .get_ethtool_stats = ice_get_ethtool_stats, + .get_sset_count = ice_get_sset_count, +}; + +/** + * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops + * @netdev: network interface device structure + */ +void ice_set_ethtool_repr_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_repr_ops; +} + /** * ice_set_ethtool_ops - setup netdev ethtool ops * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 59ef68f072c0..cbd8424631e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); if (frag) - loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF; break; case ICE_FLTR_PTYPE_NONF_IPV4_UDP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index d2d40e18ae8a..da4163856f4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -48,7 +48,7 @@ * requests that the packet not be fragmented. MF indicates that a packet has * been fragmented. */ -#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20 +#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20 enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 06ac9badee77..5e8f3256f8ea 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -735,7 +735,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw) * * This function will request ownership of the change lock. */ -static enum ice_status +enum ice_status ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, @@ -748,7 +748,7 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) * * This function will release the change lock using the proper Admin Command. */ -static void ice_release_change_lock(struct ice_hw *hw) +void ice_release_change_lock(struct ice_hw *hw) { ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); } @@ -1330,6 +1330,86 @@ fw_ddp_compat_free_alloc: } /** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= le16_to_cpu(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = le16_to_cpu(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in HW for following use. + */ +static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + + do { + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return 0; +} + +/** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer @@ -1408,6 +1488,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); + ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", status); @@ -1485,6 +1566,167 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) } /** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + unsigned long *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + if (req_profs == ICE_PROF_ALL) { + bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); + return; + } + + memset(&state, 0, sizeof(state)); + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + u32 offset; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + if (req_profs & ICE_PROF_NON_TUN) + set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @prot_ids: field vector to search for with a given protocol ID + * @ids_cnt: lookup/protocol count + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, + unsigned long *bm, struct list_head *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!ids_cnt || !hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + do { + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!test_bit((u16)offset, bm)) + continue; + + for (i = 0; i < ids_cnt; i++) { + int j; + + /* This code assumes that if a switch field vector line + * has a matching protocol, then this line will contain + * the entries necessary to represent every field in + * that protocol header. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == prot_ids[i]) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == ids_cnt) { + fvl = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*fvl), GFP_KERNEL); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + list_add(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (list_empty(fv_list)) + return ICE_ERR_CFG; + return 0; + +err: + list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { + list_del(&fvl->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvl); + } + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + bitmap_zero(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + set_bit(i, hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** * ice_pkg_buf_free * @hw: pointer to the HW structure * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) @@ -1863,6 +2105,35 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, return 0; } +/** + * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index + * @hw: pointer to the hardware structure + * @blk: hardware block + * @prof: profile ID + * @fv_idx: field vector word index + * @prot: variable to receive the protocol ID + * @off: variable to receive the protocol offset + */ +enum ice_status +ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, + u8 *prot, u16 *off) +{ + struct ice_fv_word *fv_ext; + + if (prof >= hw->blk[blk].es.count) + return ICE_ERR_PARAM; + + if (fv_idx >= hw->blk[blk].es.fvw) + return ICE_ERR_PARAM; + + fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); + + *prot = fv_ext[fv_idx].prot_id; + *off = fv_ext[fv_idx].off; + + return 0; +} + /* PTG Management */ /** diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 8a58e79729b9..344c2637facd 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -18,6 +18,20 @@ #define ICE_PKG_CNT 4 +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_change_lock(struct ice_hw *hw); +enum ice_status +ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, + u8 *prot, u16 *off); +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, + unsigned long *bm); +void +ice_init_prof_result_bm(struct ice_hw *hw); +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, + unsigned long *bm, struct list_head *fv_list); bool ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port); int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 7d8b517a63c9..120bcebaa080 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -13,6 +13,8 @@ struct ice_fv_word { u8 resvrd; } __packed; +#define ICE_MAX_NUM_PROFILES 256 + #define ICE_MAX_FV_WORDS 48 struct ice_fv { struct ice_fv_word ew[ICE_MAX_FV_WORDS]; @@ -279,6 +281,12 @@ struct ice_sw_fv_section { struct ice_fv fv[]; }; +struct ice_sw_fv_list_entry { + struct list_head list_entry; + u32 profile_id; + struct ice_fv *fv_ptr; +}; + /* The BOOST TCAM stores the match packet header in reverse order, meaning * the fields are reversed; in addition, this means that the normally big endian * fields of the packet are now little endian. @@ -603,4 +611,9 @@ struct ice_chs_chg { }; #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT + +enum ice_prof_type { + ICE_PROF_NON_TUN = 0x1, + ICE_PROF_ALL = 0xFF, +}; #endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index 2418d4fff037..f8c59df4ff9c 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -395,3 +395,210 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); } + +/** + * ice_fltr_update_rule_flags - update lan_en/lb_en flags + * @hw: pointer to hw + * @rule_id: id of rule being updated + * @recipe_id: recipe id of rule + * @act: current action field + * @type: Rx or Tx + * @src: source VSI + * @new_flags: combinations of lb_en and lan_en + */ +static enum ice_status +ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, + u32 act, u16 type, u16 src, u32 new_flags) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status err; + u32 flags_mask; + + s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; + act &= ~flags_mask; + act |= (flags_mask & new_flags); + + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id); + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + if (type & ICE_FLTR_RX) { + s_rule->pdata.lkup_tx_rx.src = + cpu_to_le16(hw->port_info->lport); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + + } else { + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + } + + err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_update_sw_rules, NULL); + + kfree(s_rule); + return err; +} + +/** + * ice_fltr_build_action - build action for rule + * @vsi_id: id of VSI which is use to build action + */ +static u32 ice_fltr_build_action(u16 vsi_id) +{ + return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) | + ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; +} + +/** + * ice_fltr_find_adv_entry - find advanced rule + * @rules: list of rules + * @rule_id: id of wanted rule + */ +static struct ice_adv_fltr_mgmt_list_entry * +ice_fltr_find_adv_entry(struct list_head *rules, u16 rule_id) +{ + struct ice_adv_fltr_mgmt_list_entry *entry; + + list_for_each_entry(entry, rules, list_entry) { + if (entry->rule_info.fltr_rule_id == rule_id) + return entry; + } + + return NULL; +} + +/** + * ice_fltr_update_adv_rule_flags - update flags on advanced rule + * @vsi: pointer to VSI + * @recipe_id: id of recipe + * @entry: advanced rule entry + * @new_flags: flags to update + */ +static enum ice_status +ice_fltr_update_adv_rule_flags(struct ice_vsi *vsi, u16 recipe_id, + struct ice_adv_fltr_mgmt_list_entry *entry, + u32 new_flags) +{ + struct ice_adv_rule_info *info = &entry->rule_info; + struct ice_sw_act_ctrl *act = &info->sw_act; + u32 action; + + if (act->fltr_act != ICE_FWD_TO_VSI) + return ICE_ERR_NOT_SUPPORTED; + + action = ice_fltr_build_action(act->fwd_id.hw_vsi_id); + + return ice_fltr_update_rule_flags(&vsi->back->hw, info->fltr_rule_id, + recipe_id, action, info->sw_act.flag, + act->src, new_flags); +} + +/** + * ice_fltr_find_regular_entry - find regular rule + * @rules: list of rules + * @rule_id: id of wanted rule + */ +static struct ice_fltr_mgmt_list_entry * +ice_fltr_find_regular_entry(struct list_head *rules, u16 rule_id) +{ + struct ice_fltr_mgmt_list_entry *entry; + + list_for_each_entry(entry, rules, list_entry) { + if (entry->fltr_info.fltr_rule_id == rule_id) + return entry; + } + + return NULL; +} + +/** + * ice_fltr_update_regular_rule - update flags on regular rule + * @vsi: pointer to VSI + * @recipe_id: id of recipe + * @entry: regular rule entry + * @new_flags: flags to update + */ +static enum ice_status +ice_fltr_update_regular_rule(struct ice_vsi *vsi, u16 recipe_id, + struct ice_fltr_mgmt_list_entry *entry, + u32 new_flags) +{ + struct ice_fltr_info *info = &entry->fltr_info; + u32 action; + + if (info->fltr_act != ICE_FWD_TO_VSI) + return ICE_ERR_NOT_SUPPORTED; + + action = ice_fltr_build_action(info->fwd_id.hw_vsi_id); + + return ice_fltr_update_rule_flags(&vsi->back->hw, info->fltr_rule_id, + recipe_id, action, info->flag, + info->src, new_flags); +} + +/** + * ice_fltr_update_flags - update flags on rule + * @vsi: pointer to VSI + * @rule_id: id of rule + * @recipe_id: id of recipe + * @new_flags: flags to update + * + * Function updates flags on regular and advance rule. + * + * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and + * ICE_SINGLE_ACT_LAN_ENABLE. + */ +enum ice_status +ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, + u32 new_flags) +{ + struct ice_adv_fltr_mgmt_list_entry *adv_entry; + struct ice_fltr_mgmt_list_entry *regular_entry; + struct ice_hw *hw = &vsi->back->hw; + struct ice_sw_recipe *recp_list; + struct list_head *fltr_rules; + + recp_list = &hw->switch_info->recp_list[recipe_id]; + if (!recp_list) + return ICE_ERR_DOES_NOT_EXIST; + + fltr_rules = &recp_list->filt_rules; + regular_entry = ice_fltr_find_regular_entry(fltr_rules, rule_id); + if (regular_entry) + return ice_fltr_update_regular_rule(vsi, recipe_id, + regular_entry, new_flags); + + adv_entry = ice_fltr_find_adv_entry(fltr_rules, rule_id); + if (adv_entry) + return ice_fltr_update_adv_rule_flags(vsi, recipe_id, + adv_entry, new_flags); + + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_fltr_update_flags_dflt_rule - update flags on default rule + * @vsi: pointer to VSI + * @rule_id: id of rule + * @direction: Tx or Rx + * @new_flags: flags to update + * + * Function updates flags on default rule with ICE_SW_LKUP_DFLT. + * + * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and + * ICE_SINGLE_ACT_LAN_ENABLE. + */ +enum ice_status +ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, + u32 new_flags) +{ + u32 action = ice_fltr_build_action(vsi->vsi_num); + struct ice_hw *hw = &vsi->back->hw; + + return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action, + direction, vsi->vsi_num, new_flags); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h index 361cb4da9b43..949e38ce016c 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.h +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -36,4 +36,11 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); + +enum ice_status +ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, + u32 new_flags); +enum ice_status +ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, + u32 new_flags); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 80736e0ec0dc..d981dc6f2323 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic { } flex_ts; }; +/* Rx Flex Descriptor NIC Profile + * RxDID Profile ID 6 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Source VSI + * Flex-field 4: reserved, VLAN ID taken from L2Tag + */ +struct ice_32b_rx_flex_desc_nic_2 { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flow_id; + __le16 src_vsi; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed @@ -529,6 +569,9 @@ struct ice_tx_ctx_desc { #define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_QW1_VSI_S 50 +#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) + enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_TSO = 0x01, ICE_TX_CTX_DESC_TSYN = 0x02, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index dde9802c6c72..c8a50898bbc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -24,6 +24,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_CTRL"; case ICE_VSI_LB: return "ICE_VSI_LB"; + case ICE_VSI_SWITCHDEV_CTRL: + return "ICE_VSI_SWITCHDEV_CTRL"; default: return "unknown"; } @@ -132,6 +134,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -200,6 +203,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; + case ICE_VSI_SWITCHDEV_CTRL: + /* The number of queues for ctrl VSI is equal to number of VFs. + * Each ring is associated to the corresponding VF_PR netdev. + */ + vsi->alloc_txq = pf->num_alloc_vfs; + vsi->alloc_rxq = pf->num_alloc_vfs; + vsi->num_q_vectors = 1; + break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; if (vf->num_req_qs) @@ -408,6 +419,21 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } +static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + struct ice_pf *pf = q_vector->vsi->back; + int i; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + ice_for_each_vf(pf, i) + napi_schedule(&pf->vf[i].repr->q_vector->napi); + + return IRQ_HANDLED; +} + /** * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure @@ -448,6 +474,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); switch (vsi->type) { + case ICE_VSI_SWITCHDEV_CTRL: + if (ice_vsi_alloc_arrays(vsi)) + goto err_rings; + + /* Setup eswitch MSIX irq handler for VSI */ + vsi->irq_handler = ice_eswitch_msix_clean_rings; + break; case ICE_VSI_PF: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; @@ -707,6 +740,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) BIT(cap->rss_table_entry_width)); vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; + case ICE_VSI_SWITCHDEV_CTRL: + vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vsi->rss_size = min_t(u16, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. @@ -980,6 +1019,9 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; + case ICE_VSI_SWITCHDEV_CTRL: + ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; + break; case ICE_VSI_VF: ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ @@ -2297,6 +2339,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CTRL: case ICE_VSI_LB: case ICE_VSI_PF: + case ICE_VSI_SWITCHDEV_CTRL: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2448,6 +2491,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, switch (vsi->type) { case ICE_VSI_CTRL: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2757,7 +2801,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } else { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL || + vsi->type == ICE_VSI_SWITCHDEV_CTRL) { ice_vsi_close(vsi); } } @@ -2859,7 +2904,8 @@ int ice_vsi_release(struct ice_vsi *vsi) clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } - ice_devlink_destroy_port(vsi); + if (vsi->type == ICE_VSI_PF) + ice_devlink_destroy_pf_port(pf); if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); @@ -3135,6 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) switch (vtype) { case ICE_VSI_CTRL: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -3573,3 +3620,126 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) return 0; } + +/** + * ice_is_feature_supported + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to be checked + * + * returns true if feature is supported, false otherwise + */ +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return false; + + return test_bit(f, pf->features); +} + +/** + * ice_set_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to set + */ +static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + set_bit(f, pf->features); +} + +/** + * ice_clear_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to clear + */ +void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + clear_bit(f, pf->features); +} + +/** + * ice_init_feature_support + * @pf: pointer to the struct ice_pf instance + * + * called during init to setup supported feature + */ +void ice_init_feature_support(struct ice_pf *pf) +{ + switch (pf->hw.device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + ice_set_feature_support(pf, ICE_F_DSCP); + if (ice_is_e810t(&pf->hw)) + ice_set_feature_support(pf, ICE_F_SMA_CTRL); + break; + default: + break; + } +} + +/** + * ice_vsi_update_security - update security block in VSI + * @vsi: pointer to VSI structure + * @fill: function pointer to fill ctx + */ +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) +{ + struct ice_vsi_ctx ctx = { 0 }; + + ctx.info = vsi->info; + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + fill(&ctx); + + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) + return -ENODEV; + + vsi->info = ctx.info; + return 0; +} + +/** + * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | + (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_ctx_set_allow_override - allow destination override on VSI + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; +} + +/** + * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index d5a28bf0fc2c..193f96305407 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -116,4 +116,19 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_clear_dflt_vsi(struct ice_sw *sw); + +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)); + +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); + +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); +void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); +void ice_init_feature_support(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0d6c143f6653..ceb0912e5850 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -19,6 +19,8 @@ */ #define CREATE_TRACE_POINTS #include "ice_trace.h" +#include "ice_eswitch.h" +#include "ice_tc_lib.h" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" static const char ice_driver_string[] = DRV_SUMMARY; @@ -46,7 +48,6 @@ static DEFINE_IDA(ice_aux_ida); static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static int ice_vsi_open(struct ice_vsi *vsi); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); @@ -624,7 +625,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); break; case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: - netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) + netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); + else + netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); break; default: break; @@ -1965,7 +1969,8 @@ static int ice_configure_phy(struct ice_vsi *vsi) ice_print_topo_conflict(vsi); - if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) + if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && + phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) return -EPERM; if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) @@ -3103,6 +3108,9 @@ static void ice_set_netdev_features(struct net_device *netdev) /* enable features */ netdev->features |= netdev->hw_features; + + netdev->hw_features |= NETIF_F_HW_TC; + /* encap and VLAN devices inherit default, csumo and tso features */ netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; @@ -3139,7 +3147,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF) { SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, mac_addr); + eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); } @@ -3538,6 +3546,13 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_left -= needed; } + /* reserve for switchdev */ + needed = ICE_ESWITCH_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + v_budget += needed; + v_left -= needed; + /* total used for non-traffic vectors */ v_other = v_budget; @@ -4170,11 +4185,11 @@ static int ice_register_netdev(struct ice_pf *pf) set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); - err = ice_devlink_create_port(vsi); + err = ice_devlink_create_pf_port(pf); if (err) goto err_devlink_create; - devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); + devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); return 0; err_devlink_create: @@ -4258,12 +4273,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); - err = ice_devlink_register(pf); - if (err) { - dev_err(dev, "ice_devlink_register failed: %d\n", err); - goto err_exit_unroll; - } - #ifndef CONFIG_DYNAMIC_DEBUG if (debug < -1) hw->debug_mask = debug; @@ -4276,6 +4285,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } + ice_init_feature_support(pf); + ice_request_fw(pf); /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be @@ -4497,6 +4508,7 @@ probe_done: dev_warn(dev, "RDMA is not supported on this device\n"); } + ice_devlink_register(pf); return 0; err_init_aux_unroll: @@ -4520,7 +4532,6 @@ err_init_pf_unroll: ice_devlink_destroy_regions(pf); ice_deinit_hw(hw); err_exit_unroll: - ice_devlink_unregister(pf); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return err; @@ -4597,9 +4608,7 @@ static void ice_remove(struct pci_dev *pdev) struct ice_pf *pf = pci_get_drvdata(pdev); int i; - if (!pf) - return; - + ice_devlink_unregister(pf); for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { if (!ice_is_reset_in_progress(pf->state)) break; @@ -4636,7 +4645,6 @@ static void ice_remove(struct pci_dev *pdev) ice_deinit_pf(pf); ice_devlink_destroy_regions(pf); ice_deinit_hw(&pf->hw); - ice_devlink_unregister(pf); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild @@ -5147,7 +5155,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) netif_addr_lock_bh(netdev); ether_addr_copy(old_mac, netdev->dev_addr); /* change the netdev's MAC address */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); + eth_hw_addr_set(netdev, mac); netif_addr_unlock_bh(netdev); /* Clean up old MAC filter. Not an error if old filter doesn't exist */ @@ -5175,7 +5183,7 @@ err_update_filters: netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); netif_addr_lock_bh(netdev); - ether_addr_copy(netdev->dev_addr, old_mac); + eth_hw_addr_set(netdev, old_mac); netif_addr_unlock_bh(netdev); return err; } @@ -5989,9 +5997,11 @@ int ice_down(struct ice_vsi *vsi) /* Caller of this function is expected to set the * vsi->state ICE_DOWN bit */ - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); + } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + ice_eswitch_stop_all_tx_queues(vsi->back); } ice_vsi_dis_irq(vsi); @@ -6058,7 +6068,8 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) if (!ring) return -EINVAL; - ring->netdev = vsi->netdev; + if (vsi->netdev) + ring->netdev = vsi->netdev; err = ice_setup_tx_ring(ring); if (err) break; @@ -6089,7 +6100,8 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) if (!ring) return -EINVAL; - ring->netdev = vsi->netdev; + if (vsi->netdev) + ring->netdev = vsi->netdev; err = ice_setup_rx_ring(ring); if (err) break; @@ -6162,7 +6174,7 @@ err_setup_tx: * * Returns 0 on success, negative value on error */ -static int ice_vsi_open(struct ice_vsi *vsi) +int ice_vsi_open(struct ice_vsi *vsi) { char int_name[ICE_INT_NAME_STR_LEN]; struct ice_pf *pf = vsi->back; @@ -6187,14 +6199,16 @@ static int ice_vsi_open(struct ice_vsi *vsi) if (err) goto err_setup_rx; - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); - if (err) - goto err_set_qs; + if (vsi->type == ICE_VSI_PF) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); + if (err) + goto err_set_qs; - err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); - if (err) - goto err_set_qs; + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); + if (err) + goto err_set_qs; + } err = ice_up_complete(vsi); if (err) @@ -6433,6 +6447,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); + if (err) { + dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + goto err_vsi_rebuild; + } + /* If Flow Director is active */ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); @@ -7054,6 +7074,72 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) } /** + * ice_setup_tc_cls_flower - flower classifier offloads + * @np: net device to configure + * @filter_dev: device on which filter is added + * @cls_flower: offload data + */ +static int +ice_setup_tc_cls_flower(struct ice_netdev_priv *np, + struct net_device *filter_dev, + struct flow_cls_offload *cls_flower) +{ + struct ice_vsi *vsi = np->vsi; + + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return ice_add_cls_flower(filter_dev, vsi, cls_flower); + case FLOW_CLS_DESTROY: + return ice_del_cls_flower(vsi, cls_flower); + default: + return -EINVAL; + } +} + +/** + * ice_setup_tc_block_cb - callback handler registered for TC block + * @type: TC SETUP type + * @type_data: TC flower offload data that contains user input + * @cb_priv: netdev private data + */ +static int +ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct ice_netdev_priv *np = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, np->vsi->netdev, + type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(ice_block_cb_list); + +static int +ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &ice_block_cb_list, + ice_setup_tc_block_cb, + np, np, true); + default: + return -EOPNOTSUPP; + } + return -EOPNOTSUPP; +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@ -7239,6 +7325,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, .ndo_start_xmit = ice_start_xmit, + .ndo_select_queue = ice_select_queue, .ndo_features_check = ice_features_check, .ndo_set_rx_mode = ice_set_rx_mode, .ndo_set_mac_address = ice_set_mac_address, @@ -7256,6 +7343,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_get_vf_stats = ice_get_vf_stats, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, + .ndo_setup_tc = ice_setup_tc, .ndo_set_features = ice_set_features, .ndo_bridge_getlink = ice_bridge_getlink, .ndo_bridge_setlink = ice_bridge_setlink, diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 199aa5b71540..0b220dfa7457 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -3,6 +3,44 @@ #ifndef _ICE_PROTOCOL_TYPE_H_ #define _ICE_PROTOCOL_TYPE_H_ +#define ICE_IPV6_ADDR_LENGTH 16 + +/* Each recipe can match up to 5 different fields. Fields to match can be meta- + * data, values extracted from packet headers, or results from other recipes. + * One of the 5 fields is reserved for matching the switch ID. So, up to 4 + * recipes can provide intermediate results to another one through chaining, + * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. + */ +#define ICE_NUM_WORDS_RECIPE 4 + +/* Max recipes that can be chained */ +#define ICE_MAX_CHAIN_RECIPE 5 + +/* 1 word reserved for switch ID from allowed 5 words. + * So a recipe can have max 4 words. And you can chain 5 such recipes + * together. So maximum words that can be programmed for look up is 5 * 4. + */ +#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) + +/* Field vector index corresponding to chaining */ +#define ICE_CHAIN_FV_INDEX_START 47 + +enum ice_protocol_type { + ICE_MAC_OFOS = 0, + ICE_MAC_IL, + ICE_ETYPE_OL, + ICE_VLAN_OFOS, + ICE_IPV4_OFOS, + ICE_IPV4_IL, + ICE_IPV6_OFOS, + ICE_IPV6_IL, + ICE_TCP_IL, + ICE_UDP_OF, + ICE_UDP_ILOS, + ICE_SCTP_IL, + ICE_PROTOCOL_LAST +}; + /* Decoders for ice_prot_id: * - F: First * - I: Inner @@ -35,4 +73,135 @@ enum ice_prot_id { ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; + +#define ICE_MAC_OFOS_HW 1 +#define ICE_MAC_IL_HW 4 +#define ICE_ETYPE_OL_HW 9 +#define ICE_VLAN_OF_HW 16 +#define ICE_VLAN_OL_HW 17 +#define ICE_IPV4_OFOS_HW 32 +#define ICE_IPV4_IL_HW 33 +#define ICE_IPV6_OFOS_HW 40 +#define ICE_IPV6_IL_HW 41 +#define ICE_TCP_IL_HW 49 +#define ICE_UDP_ILOS_HW 53 + +#define ICE_UDP_OF_HW 52 /* UDP Tunnels */ + +#define ICE_TUN_FLAG_FV_IND 2 + +/* Mapping of software defined protocol ID to hardware defined protocol ID */ +struct ice_protocol_entry { + enum ice_protocol_type type; + u8 protocol_id; +}; + +struct ice_ether_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; +}; + +struct ice_ethtype_hdr { + __be16 ethtype_id; +}; + +struct ice_ether_vlan_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 vlan_id; +}; + +struct ice_vlan_hdr { + __be16 type; + __be16 vlan; +}; + +struct ice_ipv4_hdr { + u8 version; + u8 tos; + __be16 total_length; + __be16 id; + __be16 frag_off; + u8 time_to_live; + u8 protocol; + __be16 check; + __be32 src_addr; + __be32 dst_addr; +}; + +struct ice_ipv6_hdr { + __be32 be_ver_tc_flow; + __be16 payload_len; + u8 next_hdr; + u8 hop_limit; + u8 src_addr[ICE_IPV6_ADDR_LENGTH]; + u8 dst_addr[ICE_IPV6_ADDR_LENGTH]; +}; + +struct ice_sctp_hdr { + __be16 src_port; + __be16 dst_port; + __be32 verification_tag; + __be32 check; +}; + +struct ice_l4_hdr { + __be16 src_port; + __be16 dst_port; + __be16 len; + __be16 check; +}; + +union ice_prot_hdr { + struct ice_ether_hdr eth_hdr; + struct ice_ethtype_hdr ethertype; + struct ice_vlan_hdr vlan_hdr; + struct ice_ipv4_hdr ipv4_hdr; + struct ice_ipv6_hdr ipv6_hdr; + struct ice_l4_hdr l4_hdr; + struct ice_sctp_hdr sctp_hdr; +}; + +/* This is mapping table entry that maps every word within a given protocol + * structure to the real byte offset as per the specification of that + * protocol header. + * for e.g. dst address is 3 words in ethertype header and corresponding bytes + * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 + */ +struct ice_prot_ext_tbl_entry { + enum ice_protocol_type prot_type; + /* Byte offset into header of given protocol type */ + u8 offs[sizeof(union ice_prot_hdr)]; +}; + +/* Extractions to be looked up for a given recipe */ +struct ice_prot_lkup_ext { + u16 prot_type; + u8 n_val_words; + /* create a buffer to hold max words per recipe */ + u16 field_off[ICE_MAX_CHAIN_WORDS]; + u16 field_mask[ICE_MAX_CHAIN_WORDS]; + + struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; + + /* Indicate field offsets that have field vector indices assigned */ + DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS); +}; + +struct ice_pref_recipe_group { + u8 n_val_pairs; /* Number of valid pairs */ + struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; + u16 mask[ICE_NUM_WORDS_RECIPE]; +}; + +struct ice_recp_grp_entry { + struct list_head l_entry; + +#define ICE_INVAL_CHAIN_IND 0xFF + u16 rid; + u8 chain_idx; + u16 fv_idx[ICE_NUM_WORDS_RECIPE]; + u16 fv_mask[ICE_NUM_WORDS_RECIPE]; + struct ice_pref_recipe_group r_group; +}; #endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 80380aed8882..f3884dc5524e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -6,6 +6,252 @@ #define E810_OUT_PROP_DELAY_NS 1 +static const struct ptp_pin_desc ice_pin_desc_e810t[] = { + /* name idx func chan */ + { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, + { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, + { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, + { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, + { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, +}; + +/** + * ice_get_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Read the configuration of the SMA control logic and put it into the + * ptp_pin_desc structure + */ +static int +ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) +{ + u8 data, i; + int status; + + /* Read initial pin state */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* initialize with defaults */ + for (i = 0; i < NUM_PTP_PINS_E810T; i++) { + snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), + "%s", ice_pin_desc_e810t[i].name); + ptp_pins[i].index = ice_pin_desc_e810t[i].index; + ptp_pins[i].func = ice_pin_desc_e810t[i].func; + ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; + } + + /* Parse SMA1/UFL1 */ + switch (data & ICE_SMA1_MASK_E810T) { + case ICE_SMA1_MASK_E810T: + default: + ptp_pins[SMA1].func = PTP_PF_NONE; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_DIR_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_PEROUT; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_TX_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case 0: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_PEROUT; + break; + } + + /* Parse SMA2/UFL2 */ + switch (data & ICE_SMA2_MASK_E810T) { + case ICE_SMA2_MASK_E810T: + default: + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_EXTTS; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + case ICE_SMA2_DIR_EN_E810T: + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + } + + return 0; +} + +/** + * ice_ptp_set_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Set the configuration of the SMA control logic based on the configuration in + * num_pins parameter + */ +static int +ice_ptp_set_sma_config_e810t(struct ice_hw *hw, + const struct ptp_pin_desc *ptp_pins) +{ + int status; + u8 data; + + /* SMA1 and UFL1 cannot be set to TX at the same time */ + if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_PEROUT) + return -EINVAL; + + /* SMA2 and UFL2 cannot be set to RX at the same time */ + if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_EXTTS) + return -EINVAL; + + /* Read initial pin state value */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* Set the right sate based on the desired configuration */ + data &= ~ICE_SMA1_MASK_E810T; + if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); + data |= ICE_SMA1_MASK_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX"); + data |= ICE_SMA1_TX_EN_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + /* U.FL 1 TX will always enable SMA 1 RX */ + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 TX"); + data |= ICE_SMA1_DIR_EN_E810T; + } + + data &= ~ICE_SMA2_MASK_E810T; + if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); + data |= ICE_SMA2_MASK_E810T; + } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 RX"); + data |= (ICE_SMA2_TX_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "UFL2 RX"); + data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX"); + data |= (ICE_SMA2_DIR_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); + data |= ICE_SMA2_DIR_EN_E810T; + } + + return ice_write_sma_ctrl_e810t(hw, data); +} + +/** + * ice_ptp_set_sma_e810t + * @info: the driver's PTP info structure + * @pin: pin index in kernel structure + * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) + * + * Set the configuration of a single SMA pin + */ +static int +ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func) +{ + struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_hw *hw = &pf->hw; + int err; + + if (pin < SMA1 || func > PTP_PF_PEROUT) + return -EOPNOTSUPP; + + err = ice_get_sma_config_e810t(hw, ptp_pins); + if (err) + return err; + + /* Disable the same function on the other pin sharing the channel */ + if (pin == SMA1 && ptp_pins[UFL1].func == func) + ptp_pins[UFL1].func = PTP_PF_NONE; + if (pin == UFL1 && ptp_pins[SMA1].func == func) + ptp_pins[SMA1].func = PTP_PF_NONE; + + if (pin == SMA2 && ptp_pins[UFL2].func == func) + ptp_pins[UFL2].func = PTP_PF_NONE; + if (pin == UFL2 && ptp_pins[SMA2].func == func) + ptp_pins[SMA2].func = PTP_PF_NONE; + + /* Set up new pin function in the temp table */ + ptp_pins[pin].func = func; + + return ice_ptp_set_sma_config_e810t(hw, ptp_pins); +} + +/** + * ice_verify_pin_e810t + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Verify if pin supports requested pin function. If the Check pins consistency. + * Reconfigure the SMA logic attached to the given pin to enable its + * desired functionality + */ +static int +ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + /* Don't allow channel reassignment */ + if (chan != ice_pin_desc_e810t[pin].chan) + return -EOPNOTSUPP; + + /* Check if functions are properly assigned */ + switch (func) { + case PTP_PF_NONE: + break; + case PTP_PF_EXTTS: + if (pin == UFL1) + return -EOPNOTSUPP; + break; + case PTP_PF_PEROUT: + if (pin == UFL2 || pin == GNSS) + return -EOPNOTSUPP; + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + + return ice_ptp_set_sma_e810t(info, pin, func); +} + /** * ice_set_tx_tstamp - Enable or disable Tx timestamping * @pf: The PF pointer to search in @@ -735,17 +981,34 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_perout_channel clk_cfg = {0}; + bool sma_pres = false; unsigned int chan; u32 gpio_pin; int err; + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + sma_pres = true; + switch (rq->type) { case PTP_CLK_REQ_PEROUT: chan = rq->perout.index; - if (chan == PPS_CLK_GEN_CHAN) + if (sma_pres) { + if (chan == ice_pin_desc_e810t[SMA1].chan) + clk_cfg.gpio_pin = GPIO_20; + else if (chan == ice_pin_desc_e810t[SMA2].chan) + clk_cfg.gpio_pin = GPIO_22; + else + return -1; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + clk_cfg.gpio_pin = GPIO_20; + else + clk_cfg.gpio_pin = GPIO_22; + } else if (chan == PPS_CLK_GEN_CHAN) { clk_cfg.gpio_pin = PPS_PIN_INDEX; - else + } else { clk_cfg.gpio_pin = chan; + } clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + rq->perout.period.nsec); @@ -757,7 +1020,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, break; case PTP_CLK_REQ_EXTTS: chan = rq->extts.index; - gpio_pin = chan; + if (sma_pres) { + if (chan < ice_pin_desc_e810t[SMA2].chan) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else { + gpio_pin = chan; + } err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, rq->extts.flags); @@ -1038,13 +1313,93 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, } /** + * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Disable the OS access to the SMA pins. Called to clear out the OS + * indications of pin support when we fail to setup the E810-T SMA control + * register. + */ +static void +ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + + dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + + info->enable = NULL; + info->verify = NULL; + info->n_pins = 0; + info->n_ext_ts = 0; + info->n_per_out = 0; +} + +/** + * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Finish setting up the SMA pins by allocating pin_config, and setting it up + * according to the current status of the SMA. On failure, disable all of the + * extended SMA pin support. + */ +static void +ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Allocate memory for kernel pins interface */ + info->pin_config = devm_kcalloc(dev, info->n_pins, + sizeof(*info->pin_config), GFP_KERNEL); + if (!info->pin_config) { + ice_ptp_disable_sma_pins_e810t(pf, info); + return; + } + + /* Read current SMA status */ + err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); + if (err) + ice_ptp_disable_sma_pins_e810t(pf, info); +} + +/** + * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ +static void +ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + /* Check if SMA controller is in the netlist */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) && + !ice_is_pca9575_present(&pf->hw)) + ice_clear_feature_support(pf, ICE_F_SMA_CTRL); + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + info->n_ext_ts = N_EXT_TS_E810_NO_SMA; + info->n_per_out = N_PER_OUT_E810T_NO_SMA; + return; + } + + info->n_per_out = N_PER_OUT_E810T; + info->n_ext_ts = N_EXT_TS_E810; + info->n_pins = NUM_PTP_PINS_E810T; + info->verify = ice_verify_pin_e810t; + + /* Complete setup of the SMA pins */ + ice_ptp_setup_sma_pins_e810t(pf, info); +} + +/** * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs * @info: PTP clock capabilities */ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) { - info->n_per_out = E810_N_PER_OUT; - info->n_ext_ts = E810_N_EXT_TS; + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; } /** @@ -1062,7 +1417,10 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) { info->enable = ice_ptp_gpio_enable_e810; - ice_ptp_setup_pins_e810(info); + if (ice_is_e810t(&pf->hw)) + ice_ptp_setup_pins_e810t(pf, info); + else + ice_ptp_setup_pins_e810(info); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index e1c787bd5b96..1b9aab7de319 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -9,12 +9,21 @@ #include "ice_ptp_hw.h" -enum ice_ptp_pin { +enum ice_ptp_pin_e810 { GPIO_20 = 0, GPIO_21, GPIO_22, GPIO_23, - NUM_ICE_PTP_PIN + NUM_PTP_PIN_E810 +}; + +enum ice_ptp_pin_e810t { + GNSS = 0, + SMA1, + UFL1, + SMA2, + UFL2, + NUM_PTP_PINS_E810T }; struct ice_perout_channel { @@ -155,8 +164,11 @@ struct ice_ptp { #define PPS_CLK_SRC_CHAN 2 #define PPS_PIN_INDEX 5 #define TIME_SYNC_PIN_INDEX 4 -#define E810_N_EXT_TS 3 -#define E810_N_PER_OUT 4 +#define N_EXT_TS_E810 3 +#define N_PER_OUT_E810 4 +#define N_PER_OUT_E810T 3 +#define N_PER_OUT_E810T_NO_SMA 2 +#define N_EXT_TS_E810_NO_SMA 2 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) struct ice_pf; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 3eca0e4eab0b..29f947c0cd2e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -649,3 +649,154 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { return ice_clear_phy_tstamp_e810(hw, block, idx); } + +/* E810T SMA functions + * + * The following functions operate specifically on E810T hardware and are used + * to access the extended GPIOs available. + */ + +/** + * ice_get_pca9575_handle + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value + */ +static int +ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + int status; + u8 idx; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + + /* If handle was not detected read it from the netlist */ + cmd = &desc.params.get_link_topo; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + + /* Set node type to GPIO controller */ + cmd->addr.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -EOPNOTSUPP; + + cmd->addr.topo_params.index = idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) + return -EOPNOTSUPP; + + /* Verify if we found the right IO expander type */ + if (desc.params.get_link_topo.node_part_num != + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) + return -EOPNOTSUPP; + + /* If present save the handle and return it */ + hw->io_expander_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_read_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: pointer to data to be read from the GPIO controller + * + * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the + * PCA9575 expander, so only bits 3-7 in data are valid. + */ +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + *data = 0; + + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + bool pin; + + status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, + &pin, NULL); + if (status) + break; + *data |= (u8)(!pin) << i; + } + + return status; +} + +/** + * ice_write_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: data to be written to the GPIO controller + * + * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 + * of the PCA9575 expander, so only bits 3-7 in data are valid. + */ +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + bool pin; + + pin = !(data & (1 << i)); + status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, + pin, NULL); + if (status) + break; + } + + return status; +} + +/** + * ice_is_pca9575_present + * @hw: pointer to the hw struct + * + * Check if the SW IO expander is present in the netlist + */ +bool ice_is_pca9575_present(struct ice_hw *hw) +{ + u16 handle = 0; + int status; + + if (!ice_is_e810t(hw)) + return false; + + status = ice_get_pca9575_handle(hw, &handle); + + return !status && handle; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 55a414e87018..b2984b5c22c1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -30,6 +30,9 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); /* E810 family functions */ int ice_ptp_init_phy_e810(struct ice_hw *hw); +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); +bool ice_is_pca9575_present(struct ice_hw *hw); #define PFTSYN_SEM_BYTES 4 @@ -76,4 +79,23 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw); #define LOW_TX_MEMORY_BANK_START 0x03090000 #define HIGH_TX_MEMORY_BANK_START 0x03090004 +/* E810T SMA controller pin control */ +#define ICE_SMA1_DIR_EN_E810T BIT(4) +#define ICE_SMA1_TX_EN_E810T BIT(5) +#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3) +#define ICE_SMA2_DIR_EN_E810T BIT(6) +#define ICE_SMA2_TX_EN_E810T BIT(7) + +#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \ + ICE_SMA1_TX_EN_E810T) +#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \ + ICE_SMA2_DIR_EN_E810T | \ + ICE_SMA2_TX_EN_E810T) +#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \ + ICE_SMA2_MASK_E810T) + +#define ICE_SMA_MIN_BIT_E810T 3 +#define ICE_SMA_MAX_BIT_E810T 7 +#define ICE_PCA9575_P1_OFFSET 8 + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c new file mode 100644 index 000000000000..c49eeea7cb67 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_eswitch.h" +#include "ice_devlink.h" +#include "ice_virtchnl_pf.h" +#include "ice_tc_lib.h" + +/** + * ice_repr_get_sw_port_id - get port ID associated with representor + * @repr: pointer to port representor + */ +static int ice_repr_get_sw_port_id(struct ice_repr *repr) +{ + return repr->vf->pf->hw.port_info->lport; +} + +/** + * ice_repr_get_phys_port_name - get phys port name + * @netdev: pointer to port representor netdev + * @buf: write here port name + * @len: max length of buf + */ +static int +ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_repr *repr = np->repr; + int res; + + /* Devlink port is registered and devlink core is taking care of name formatting. */ + if (repr->vf->devlink_port.devlink) + return -EOPNOTSUPP; + + res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), + repr->vf->vf_id); + if (res <= 0) + return -EOPNOTSUPP; + return 0; +} + +/** + * ice_repr_get_stats64 - get VF stats for VFPR use + * @netdev: pointer to port representor netdev + * @stats: pointer to struct where stats can be stored + */ +static void +ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_eth_stats *eth_stats; + struct ice_vsi *vsi; + + if (ice_is_vf_disabled(np->repr->vf)) + return; + vsi = np->repr->src_vsi; + + ice_update_vsi_stats(vsi); + eth_stats = &vsi->eth_stats; + + stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + + eth_stats->tx_multicast; + stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + + eth_stats->rx_multicast; + stats->tx_bytes = eth_stats->tx_bytes; + stats->rx_bytes = eth_stats->rx_bytes; + stats->multicast = eth_stats->rx_multicast; + stats->tx_errors = eth_stats->tx_errors; + stats->tx_dropped = eth_stats->tx_discards; + stats->rx_dropped = eth_stats->rx_discards; +} + +/** + * ice_netdev_to_repr - Get port representor for given netdevice + * @netdev: pointer to port representor netdev + */ +struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + return np->repr; +} + +/** + * ice_repr_open - Enable port representor's network interface + * @netdev: network interface device structure + * + * The open entry point is called when a port representor's network + * interface is made active by the system (IFF_UP). Corresponding + * VF is notified about link status change. + * + * Returns 0 on success + */ +static int ice_repr_open(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_vf *vf; + + vf = repr->vf; + vf->link_forced = true; + vf->link_up = true; + ice_vc_notify_vf_link_state(vf); + + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +} + +/** + * ice_repr_stop - Disable port representor's network interface + * @netdev: network interface device structure + * + * The stop entry point is called when a port representor's network + * interface is de-activated by the system. Corresponding + * VF is notified about link status change. + * + * Returns 0 on success + */ +static int ice_repr_stop(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_vf *vf; + + vf = repr->vf; + vf->link_forced = true; + vf->link_up = false; + ice_vc_notify_vf_link_state(vf); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + +static struct devlink_port * +ice_repr_get_devlink_port(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + return &repr->vf->devlink_port; +} + +static int +ice_repr_setup_tc_cls_flower(struct ice_repr *repr, + struct flow_cls_offload *flower) +{ + switch (flower->command) { + case FLOW_CLS_REPLACE: + return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); + case FLOW_CLS_DESTROY: + return ice_del_cls_flower(repr->src_vsi, flower); + default: + return -EINVAL; + } +} + +static int +ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data; + struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_repr_setup_tc_cls_flower(np->repr, flower); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(ice_repr_block_cb_list); + +static int +ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple((struct flow_block_offload *) + type_data, + &ice_repr_block_cb_list, + ice_repr_setup_tc_block_cb, + np, np, true); + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops ice_repr_netdev_ops = { + .ndo_get_phys_port_name = ice_repr_get_phys_port_name, + .ndo_get_stats64 = ice_repr_get_stats64, + .ndo_open = ice_repr_open, + .ndo_stop = ice_repr_stop, + .ndo_start_xmit = ice_eswitch_port_start_xmit, + .ndo_get_devlink_port = ice_repr_get_devlink_port, + .ndo_setup_tc = ice_repr_setup_tc, +}; + +/** + * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev + * @netdev: pointer to netdev + */ +bool ice_is_port_repr_netdev(struct net_device *netdev) +{ + return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); +} + +/** + * ice_repr_reg_netdev - register port representor netdev + * @netdev: pointer to port representor netdev + */ +static int +ice_repr_reg_netdev(struct net_device *netdev) +{ + eth_hw_addr_random(netdev); + netdev->netdev_ops = &ice_repr_netdev_ops; + ice_set_ethtool_repr_ops(netdev); + + netdev->hw_features |= NETIF_F_HW_TC; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return register_netdev(netdev); +} + +/** + * ice_repr_add - add representor for VF + * @vf: pointer to VF structure + */ +static int ice_repr_add(struct ice_vf *vf) +{ + struct ice_q_vector *q_vector; + struct ice_netdev_priv *np; + struct ice_repr *repr; + int err; + + repr = kzalloc(sizeof(*repr), GFP_KERNEL); + if (!repr) + return -ENOMEM; + + repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); + if (!repr->netdev) { + err = -ENOMEM; + goto err_alloc; + } + + repr->src_vsi = ice_get_vf_vsi(vf); + repr->vf = vf; + vf->repr = repr; + np = netdev_priv(repr->netdev); + np->repr = repr; + + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) { + err = -ENOMEM; + goto err_alloc_q_vector; + } + repr->q_vector = q_vector; + + err = ice_devlink_create_vf_port(vf); + if (err) + goto err_devlink; + + err = ice_repr_reg_netdev(repr->netdev); + if (err) + goto err_netdev; + + devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); + + return 0; + +err_netdev: + ice_devlink_destroy_vf_port(vf); +err_devlink: + kfree(repr->q_vector); + vf->repr->q_vector = NULL; +err_alloc_q_vector: + free_netdev(repr->netdev); + repr->netdev = NULL; +err_alloc: + kfree(repr); + vf->repr = NULL; + return err; +} + +/** + * ice_repr_rem - remove representor from VF + * @vf: pointer to VF structure + */ +static void ice_repr_rem(struct ice_vf *vf) +{ + ice_devlink_destroy_vf_port(vf); + kfree(vf->repr->q_vector); + vf->repr->q_vector = NULL; + unregister_netdev(vf->repr->netdev); + free_netdev(vf->repr->netdev); + vf->repr->netdev = NULL; + kfree(vf->repr); + vf->repr = NULL; +} + +/** + * ice_repr_add_for_all_vfs - add port representor for all VFs + * @pf: pointer to PF structure + */ +int ice_repr_add_for_all_vfs(struct ice_pf *pf) +{ + int err; + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + err = ice_repr_add(vf); + if (err) + goto err; + + ice_vc_change_ops_to_repr(&vf->vc_ops); + } + + return 0; + +err: + for (i = i - 1; i >= 0; i--) { + struct ice_vf *vf = &pf->vf[i]; + + ice_repr_rem(vf); + ice_vc_set_dflt_vf_ops(&vf->vc_ops); + } + + return err; +} + +/** + * ice_repr_rem_from_all_vfs - remove port representor for all VFs + * @pf: pointer to PF structure + */ +void ice_repr_rem_from_all_vfs(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + ice_repr_rem(vf); + ice_vc_set_dflt_vf_ops(&vf->vc_ops); + } +} + +/** + * ice_repr_start_tx_queues - start Tx queues of port representor + * @repr: pointer to repr structure + */ +void ice_repr_start_tx_queues(struct ice_repr *repr) +{ + netif_carrier_on(repr->netdev); + netif_tx_start_all_queues(repr->netdev); +} + +/** + * ice_repr_stop_tx_queues - stop Tx queues of port representor + * @repr: pointer to repr structure + */ +void ice_repr_stop_tx_queues(struct ice_repr *repr) +{ + netif_carrier_off(repr->netdev); + netif_tx_stop_all_queues(repr->netdev); +} + +/** + * ice_repr_set_traffic_vsi - set traffic VSI for port representor + * @repr: repr on with VSI will be set + * @vsi: pointer to VSI that will be used by port representor to pass traffic + */ +void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np = netdev_priv(repr->netdev); + + np->vsi = vsi; +} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h new file mode 100644 index 000000000000..806de22933c6 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_REPR_H_ +#define _ICE_REPR_H_ + +#include <net/dst_metadata.h> +#include "ice.h" + +struct ice_repr { + struct ice_vsi *src_vsi; + struct ice_vf *vf; + struct ice_q_vector *q_vector; + struct net_device *netdev; + struct metadata_dst *dst; +}; + +int ice_repr_add_for_all_vfs(struct ice_pf *pf); +void ice_repr_rem_from_all_vfs(struct ice_pf *pf); + +void ice_repr_start_tx_queues(struct ice_repr *repr); +void ice_repr_stop_tx_queues(struct ice_repr *repr); + +void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); + +struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); +bool ice_is_port_repr_netdev(struct net_device *netdev); +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 3b6c1420aa7b..0d07547b40f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -8,6 +8,7 @@ #define ICE_ETH_ETHTYPE_OFFSET 12 #define ICE_ETH_VLAN_TCI_OFFSET 14 #define ICE_MAX_VLAN_ID 0xFFF +#define ICE_IPV6_ETHER_ID 0x86DD /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem * struct to configure any switch filter rules. @@ -29,6 +30,290 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; +struct ice_dummy_pkt_offsets { + enum ice_protocol_type type; + u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ +}; + +/* offset info for MAC + IPv4 + UDP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* Dummy packet for MAC + IPv4 + UDP */ +static const u8 dummy_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV4_OFOS, 18 }, + { ICE_UDP_ILOS, 38 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (801.1Q), IPv4:UDP dummy packet */ +static const u8 dummy_vlan_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x08, 0x00, /* ICE_ETYPE_OL 16 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + IPv4 + TCP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_TCP_IL, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* Dummy packet for MAC + IPv4 + TCP */ +static const u8 dummy_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV4_OFOS, 18 }, + { ICE_TCP_IL, 38 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (801.1Q), IPv4:TCP dummy packet */ +static const u8 dummy_vlan_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x08, 0x00, /* ICE_ETYPE_OL 16 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_TCP_IL, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_tcp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* C-tag (802.1Q): IPv6 + TCP */ +static const struct ice_dummy_pkt_offsets +dummy_vlan_tcp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV6_OFOS, 18 }, + { ICE_TCP_IL, 58 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (802.1Q), IPv6 + TCP dummy packet */ +static const u8 dummy_vlan_tcp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ + + 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* IPv6 + UDP */ +static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* IPv6 + UDP dummy packet */ +static const u8 dummy_udp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ + 0x00, 0x10, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* C-tag (802.1Q): IPv6 + UDP */ +static const struct ice_dummy_pkt_offsets +dummy_vlan_udp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_VLAN_OFOS, 12 }, + { ICE_ETYPE_OL, 16 }, + { ICE_IPV6_OFOS, 18 }, + { ICE_UDP_ILOS, 58 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (802.1Q), IPv6 + UDP dummy packet */ +static const u8 dummy_vlan_udp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ + + 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ + 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ (DUMMY_ETH_HDR_LEN * \ @@ -42,6 +327,14 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) +/* this is a recipe to profile association bitmap */ +static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], + ICE_MAX_NUM_PROFILES); + +/* this is a profile to recipe association bitmap */ +static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], + ICE_MAX_NUM_RECIPES); + /** * ice_init_def_sw_recp - initialize the recipe book keeping tables * @hw: pointer to the HW struct @@ -59,10 +352,11 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) if (!recps) return ICE_ERR_NO_MEMORY; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); + INIT_LIST_HEAD(&recps[i].rg_list); mutex_init(&recps[i].filt_rule_lock); } @@ -518,7 +812,7 @@ ice_aq_alloc_free_vsi_list_exit: * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -static enum ice_status +enum ice_status ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { @@ -543,6 +837,358 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, return status; } +/** + * ice_aq_add_recipe - add switch recipe + * @hw: pointer to the HW struct + * @s_recipe_list: pointer to switch rule population list + * @num_recipes: number of switch recipes in the list + * @cd: pointer to command details structure or NULL + * + * Add(0x0290) + */ +static enum ice_status +ice_aq_add_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 num_recipes, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_recipe *cmd; + struct ice_aq_desc desc; + u16 buf_size; + + cmd = &desc.params.add_get_recipe; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); + + cmd->num_sub_recipes = cpu_to_le16(num_recipes); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + buf_size = num_recipes * sizeof(*s_recipe_list); + + return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); +} + +/** + * ice_aq_get_recipe - get switch recipe + * @hw: pointer to the HW struct + * @s_recipe_list: pointer to switch rule population list + * @num_recipes: pointer to the number of recipes (input and output) + * @recipe_root: root recipe number of recipe(s) to retrieve + * @cd: pointer to command details structure or NULL + * + * Get(0x0292) + * + * On input, *num_recipes should equal the number of entries in s_recipe_list. + * On output, *num_recipes will equal the number of entries returned in + * s_recipe_list. + * + * The caller must supply enough space in s_recipe_list to hold all possible + * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. + */ +static enum ice_status +ice_aq_get_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_recipe *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u16 buf_size; + + if (*num_recipes != ICE_MAX_NUM_RECIPES) + return ICE_ERR_PARAM; + + cmd = &desc.params.add_get_recipe; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); + + cmd->return_index = cpu_to_le16(recipe_root); + cmd->num_sub_recipes = 0; + + buf_size = *num_recipes * sizeof(*s_recipe_list); + + status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); + *num_recipes = le16_to_cpu(cmd->num_sub_recipes); + + return status; +} + +/** + * ice_aq_map_recipe_to_profile - Map recipe to packet profile + * @hw: pointer to the HW struct + * @profile_id: package profile ID to associate the recipe with + * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @cd: pointer to command details structure or NULL + * Recipe to profile association (0x0291) + */ +static enum ice_status +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd) +{ + struct ice_aqc_recipe_to_profile *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.recipe_to_profile; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); + cmd->profile_id = cpu_to_le16(profile_id); + /* Set the recipe ID bit in the bitmask to let the device know which + * profile we are associating the recipe to + */ + memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_get_recipe_to_profile - Map recipe to packet profile + * @hw: pointer to the HW struct + * @profile_id: package profile ID to associate the recipe with + * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @cd: pointer to command details structure or NULL + * Associate profile ID with given recipe (0x0293) + */ +static enum ice_status +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd) +{ + struct ice_aqc_recipe_to_profile *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.recipe_to_profile; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); + cmd->profile_id = cpu_to_le16(profile_id); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) + memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); + + return status; +} + +/** + * ice_alloc_recipe - add recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID returned as response to AQ call + */ +static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + enum ice_status status; + u16 buf_len; + + buf_len = struct_size(sw_buf, elem, 1); + sw_buf = kzalloc(buf_len, GFP_KERNEL); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + sw_buf->num_elems = cpu_to_le16(1); + sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << + ICE_AQC_RES_TYPE_S) | + ICE_AQC_RES_TYPE_FLAG_SHARED); + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (!status) + *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); + kfree(sw_buf); + + return status; +} + +/** + * ice_get_recp_to_prof_map - updates recipe to profile mapping + * @hw: pointer to hardware structure + * + * This function is used to populate recipe_to_profile matrix where index to + * this array is the recipe ID and the element is the mapping of which profiles + * is this recipe mapped to. + */ +static void ice_get_recp_to_prof_map(struct ice_hw *hw) +{ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u16 i; + + for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { + u16 j; + + bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); + bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); + if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) + continue; + bitmap_copy(profile_to_recipe[i], r_bitmap, + ICE_MAX_NUM_RECIPES); + for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) + set_bit(i, recipe_to_profile[j]); + } +} + +/** + * ice_collect_result_idx - copy result index values + * @buf: buffer that contains the result index + * @recp: the recipe struct to copy data into + */ +static void +ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, + struct ice_sw_recipe *recp) +{ + if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) + set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, + recp->res_idxs); +} + +/** + * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries + * @hw: pointer to hardware structure + * @recps: struct that we need to populate + * @rid: recipe ID that we are populating + * @refresh_required: true if we should get recipe to profile mapping from FW + * + * This function is used to populate all the necessary entries into our + * bookkeeping so that we have a current list of all the recipes that are + * programmed in the firmware. + */ +static enum ice_status +ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, + bool *refresh_required) +{ + DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *tmp; + u16 num_recps = ICE_MAX_NUM_RECIPES; + struct ice_prot_lkup_ext *lkup_exts; + enum ice_status status; + u8 fv_word_idx = 0; + u16 sub_recps; + + bitmap_zero(result_bm, ICE_MAX_FV_WORDS); + + /* we need a buffer big enough to accommodate all the recipes */ + tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp[0].recipe_indx = rid; + status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); + /* non-zero status meaning recipe doesn't exist */ + if (status) + goto err_unroll; + + /* Get recipe to profile map so that we can get the fv from lkups that + * we read for a recipe from FW. Since we want to minimize the number of + * times we make this FW call, just make one call and cache the copy + * until a new recipe is added. This operation is only required the + * first time to get the changes from FW. Then to search existing + * entries we don't need to update the cache again until another recipe + * gets added. + */ + if (*refresh_required) { + ice_get_recp_to_prof_map(hw); + *refresh_required = false; + } + + /* Start populating all the entries for recps[rid] based on lkups from + * firmware. Note that we are only creating the root recipe in our + * database. + */ + lkup_exts = &recps[rid].lkup_exts; + + for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { + struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; + struct ice_recp_grp_entry *rg_entry; + u8 i, prof, idx, prot = 0; + bool is_root; + u16 off = 0; + + rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), + GFP_KERNEL); + if (!rg_entry) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + + idx = root_bufs.recipe_indx; + is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; + + /* Mark all result indices in this chain */ + if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) + set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, + result_bm); + + /* get the first profile that is associated with rid */ + prof = find_first_bit(recipe_to_profile[idx], + ICE_MAX_NUM_PROFILES); + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; + + rg_entry->fv_idx[i] = lkup_indx; + rg_entry->fv_mask[i] = + le16_to_cpu(root_bufs.content.mask[i + 1]); + + /* If the recipe is a chained recipe then all its + * child recipe's result will have a result index. + * To fill fv_words we should not use those result + * index, we only need the protocol ids and offsets. + * We will skip all the fv_idx which stores result + * index in them. We also need to skip any fv_idx which + * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a + * valid offset value. + */ + if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || + rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || + rg_entry->fv_idx[i] == 0) + continue; + + ice_find_prot_off(hw, ICE_BLK_SW, prof, + rg_entry->fv_idx[i], &prot, &off); + lkup_exts->fv_words[fv_word_idx].prot_id = prot; + lkup_exts->fv_words[fv_word_idx].off = off; + lkup_exts->field_mask[fv_word_idx] = + rg_entry->fv_mask[i]; + fv_word_idx++; + } + /* populate rg_list with the data from the child entry of this + * recipe + */ + list_add(&rg_entry->l_entry, &recps[rid].rg_list); + + /* Propagate some data to the recipe database */ + recps[idx].is_root = !!is_root; + recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); + if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { + recps[idx].chain_idx = root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + set_bit(recps[idx].chain_idx, recps[idx].res_idxs); + } else { + recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + } + + if (!is_root) + continue; + + /* Only do the following for root recipes entries */ + memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, + sizeof(recps[idx].r_bitmap)); + recps[idx].root_rid = root_bufs.content.rid & + ~ICE_AQ_RECIPE_ID_IS_ROOT; + recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + } + + /* Complete initialization of the root recipe entry */ + lkup_exts->n_val_words = fv_word_idx; + recps[rid].big_recp = (num_recps > 1); + recps[rid].n_grp_count = (u8)num_recps; + recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, + recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), + GFP_KERNEL); + if (!recps[rid].root_buf) + goto err_unroll; + + /* Copy result indexes */ + bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); + recps[rid].recp_created = true; + +err_unroll: + kfree(tmp); + return status; +} + /* ice_init_port_info - Initialize port_info with switch configuration data * @pi: pointer to port_info * @vsi_port_num: VSI number or port number @@ -2037,6 +2683,27 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) } /** + * ice_rem_adv_rule_info + * @hw: pointer to the hardware structure + * @rule_head: pointer to the switch list structure that we want to delete + */ +static void +ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) +{ + struct ice_adv_fltr_mgmt_list_entry *tmp_entry; + struct ice_adv_fltr_mgmt_list_entry *lst_itr; + + if (list_empty(rule_head)) + return; + + list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } +} + +/** * ice_cfg_dflt_vsi - change state of VSI to set/clear default * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to set as default @@ -2773,6 +3440,1452 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, return status; } +/* This is mapping table entry that maps every word within a given protocol + * structure to the real byte offset as per the specification of that + * protocol header. + * for example dst address is 3 words in ethertype header and corresponding + * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 + * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a + * matching entry describing its field. This needs to be updated if new + * structure is added to that union. + */ +static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { + { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, + { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, + { ICE_ETYPE_OL, { 0 } }, + { ICE_VLAN_OFOS, { 2, 0 } }, + { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, + { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, + { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38 } }, + { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38 } }, + { ICE_TCP_IL, { 0, 2 } }, + { ICE_UDP_OF, { 0, 2 } }, + { ICE_UDP_ILOS, { 0, 2 } }, +}; + +static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { + { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, + { ICE_MAC_IL, ICE_MAC_IL_HW }, + { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, + { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, + { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, + { ICE_IPV4_IL, ICE_IPV4_IL_HW }, + { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, + { ICE_IPV6_IL, ICE_IPV6_IL_HW }, + { ICE_TCP_IL, ICE_TCP_IL_HW }, + { ICE_UDP_OF, ICE_UDP_OF_HW }, + { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, +}; + +/** + * ice_find_recp - find a recipe + * @hw: pointer to the hardware structure + * @lkup_exts: extension sequence to match + * + * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. + */ +static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) +{ + bool refresh_required = true; + struct ice_sw_recipe *recp; + u8 i; + + /* Walk through existing recipes to find a match */ + recp = hw->switch_info->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + /* If recipe was not created for this ID, in SW bookkeeping, + * check if FW has an entry for this recipe. If the FW has an + * entry update it in our SW bookkeeping and continue with the + * matching. + */ + if (!recp[i].recp_created) + if (ice_get_recp_frm_fw(hw, + hw->switch_info->recp_list, i, + &refresh_required)) + continue; + + /* Skip inverse action recipes */ + if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & + ICE_AQ_RECIPE_ACT_INV_ACT) + continue; + + /* if number of words we are looking for match */ + if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { + struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; + struct ice_fv_word *be = lkup_exts->fv_words; + u16 *cr = recp[i].lkup_exts.field_mask; + u16 *de = lkup_exts->field_mask; + bool found = true; + u8 pe, qr; + + /* ar, cr, and qr are related to the recipe words, while + * be, de, and pe are related to the lookup words + */ + for (pe = 0; pe < lkup_exts->n_val_words; pe++) { + for (qr = 0; qr < recp[i].lkup_exts.n_val_words; + qr++) { + if (ar[qr].off == be[pe].off && + ar[qr].prot_id == be[pe].prot_id && + cr[qr] == de[pe]) + /* Found the "pe"th word in the + * given recipe + */ + break; + } + /* After walking through all the words in the + * "i"th recipe if "p"th word was not found then + * this recipe is not what we are looking for. + * So break out from this loop and try the next + * recipe + */ + if (qr >= recp[i].lkup_exts.n_val_words) { + found = false; + break; + } + } + /* If for "i"th recipe the found was never set to false + * then it means we found our match + */ + if (found) + return i; /* Return the recipe ID */ + } + } + return ICE_MAX_NUM_RECIPES; +} + +/** + * ice_prot_type_to_id - get protocol ID from protocol type + * @type: protocol type + * @id: pointer to variable that will receive the ID + * + * Returns true if found, false otherwise + */ +static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) +{ + u8 i; + + for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) + if (ice_prot_id_tbl[i].type == type) { + *id = ice_prot_id_tbl[i].protocol_id; + return true; + } + return false; +} + +/** + * ice_fill_valid_words - count valid words + * @rule: advanced rule with lookup information + * @lkup_exts: byte offset extractions of the words that are valid + * + * calculate valid words in a lookup rule using mask value + */ +static u8 +ice_fill_valid_words(struct ice_adv_lkup_elem *rule, + struct ice_prot_lkup_ext *lkup_exts) +{ + u8 j, word, prot_id, ret_val; + + if (!ice_prot_type_to_id(rule->type, &prot_id)) + return 0; + + word = lkup_exts->n_val_words; + + for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) + if (((u16 *)&rule->m_u)[j] && + rule->type < ARRAY_SIZE(ice_prot_ext)) { + /* No more space to accommodate */ + if (word >= ICE_MAX_CHAIN_WORDS) + return 0; + lkup_exts->fv_words[word].off = + ice_prot_ext[rule->type].offs[j]; + lkup_exts->fv_words[word].prot_id = + ice_prot_id_tbl[rule->type].protocol_id; + lkup_exts->field_mask[word] = + be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); + word++; + } + + ret_val = word - lkup_exts->n_val_words; + lkup_exts->n_val_words = word; + + return ret_val; +} + +/** + * ice_create_first_fit_recp_def - Create a recipe grouping + * @hw: pointer to the hardware structure + * @lkup_exts: an array of protocol header extractions + * @rg_list: pointer to a list that stores new recipe groups + * @recp_cnt: pointer to a variable that stores returned number of recipe groups + * + * Using first fit algorithm, take all the words that are still not done + * and start grouping them in 4-word groups. Each group makes up one + * recipe. + */ +static enum ice_status +ice_create_first_fit_recp_def(struct ice_hw *hw, + struct ice_prot_lkup_ext *lkup_exts, + struct list_head *rg_list, + u8 *recp_cnt) +{ + struct ice_pref_recipe_group *grp = NULL; + u8 j; + + *recp_cnt = 0; + + /* Walk through every word in the rule to check if it is not done. If so + * then this word needs to be part of a new recipe. + */ + for (j = 0; j < lkup_exts->n_val_words; j++) + if (!test_bit(j, lkup_exts->done)) { + if (!grp || + grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { + struct ice_recp_grp_entry *entry; + + entry = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*entry), + GFP_KERNEL); + if (!entry) + return ICE_ERR_NO_MEMORY; + list_add(&entry->l_entry, rg_list); + grp = &entry->r_group; + (*recp_cnt)++; + } + + grp->pairs[grp->n_val_pairs].prot_id = + lkup_exts->fv_words[j].prot_id; + grp->pairs[grp->n_val_pairs].off = + lkup_exts->fv_words[j].off; + grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; + grp->n_val_pairs++; + } + + return 0; +} + +/** + * ice_fill_fv_word_index - fill in the field vector indices for a recipe group + * @hw: pointer to the hardware structure + * @fv_list: field vector with the extraction sequence information + * @rg_list: recipe groupings with protocol-offset pairs + * + * Helper function to fill in the field vector indices for protocol-offset + * pairs. These indexes are then ultimately programmed into a recipe. + */ +static enum ice_status +ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, + struct list_head *rg_list) +{ + struct ice_sw_fv_list_entry *fv; + struct ice_recp_grp_entry *rg; + struct ice_fv_word *fv_ext; + + if (list_empty(fv_list)) + return 0; + + fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, + list_entry); + fv_ext = fv->fv_ptr->ew; + + list_for_each_entry(rg, rg_list, l_entry) { + u8 i; + + for (i = 0; i < rg->r_group.n_val_pairs; i++) { + struct ice_fv_word *pr; + bool found = false; + u16 mask; + u8 j; + + pr = &rg->r_group.pairs[i]; + mask = rg->r_group.mask[i]; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv_ext[j].prot_id == pr->prot_id && + fv_ext[j].off == pr->off) { + found = true; + + /* Store index of field vector */ + rg->fv_idx[i] = j; + rg->fv_mask[i] = mask; + break; + } + + /* Protocol/offset could not be found, caller gave an + * invalid pair + */ + if (!found) + return ICE_ERR_PARAM; + } + } + + return 0; +} + +/** + * ice_find_free_recp_res_idx - find free result indexes for recipe + * @hw: pointer to hardware structure + * @profiles: bitmap of profiles that will be associated with the new recipe + * @free_idx: pointer to variable to receive the free index bitmap + * + * The algorithm used here is: + * 1. When creating a new recipe, create a set P which contains all + * Profiles that will be associated with our new recipe + * + * 2. For each Profile p in set P: + * a. Add all recipes associated with Profile p into set R + * b. Optional : PossibleIndexes &= profile[p].possibleIndexes + * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] + * i. Or just assume they all have the same possible indexes: + * 44, 45, 46, 47 + * i.e., PossibleIndexes = 0x0000F00000000000 + * + * 3. For each Recipe r in set R: + * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes + * b. FreeIndexes = UsedIndexes ^ PossibleIndexes + * + * FreeIndexes will contain the bits indicating the indexes free for use, + * then the code needs to update the recipe[r].used_result_idx_bits to + * indicate which indexes were selected for use by this recipe. + */ +static u16 +ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, + unsigned long *free_idx) +{ + DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); + DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); + DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); + u16 bit; + + bitmap_zero(possible_idx, ICE_MAX_FV_WORDS); + bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); + bitmap_zero(used_idx, ICE_MAX_FV_WORDS); + bitmap_zero(free_idx, ICE_MAX_FV_WORDS); + + bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); + + /* For each profile we are going to associate the recipe with, add the + * recipes that are associated with that profile. This will give us + * the set of recipes that our recipe may collide with. Also, determine + * what possible result indexes are usable given this set of profiles. + */ + for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { + bitmap_or(recipes, recipes, profile_to_recipe[bit], + ICE_MAX_NUM_RECIPES); + bitmap_and(possible_idx, possible_idx, + hw->switch_info->prof_res_bm[bit], + ICE_MAX_FV_WORDS); + } + + /* For each recipe that our new recipe may collide with, determine + * which indexes have been used. + */ + for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) + bitmap_or(used_idx, used_idx, + hw->switch_info->recp_list[bit].res_idxs, + ICE_MAX_FV_WORDS); + + bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); + + /* return number of free indexes */ + return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); +} + +/** + * ice_add_sw_recipe - function to call AQ calls to create switch recipe + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @match_tun_mask: tunnel mask that needs to be programmed + * @profiles: bitmap of profiles that will be associated. + */ +static enum ice_status +ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, + u16 match_tun_mask, unsigned long *profiles) +{ + DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *tmp; + struct ice_aqc_recipe_data_elem *buf; + struct ice_recp_grp_entry *entry; + enum ice_status status; + u16 free_res_idx; + u16 recipe_count; + u8 chain_idx; + u8 recps = 0; + + /* When more than one recipe are required, another recipe is needed to + * chain them together. Matching a tunnel metadata ID takes up one of + * the match fields in the chaining recipe reducing the number of + * chained recipes by one. + */ + /* check number of free result indices */ + bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); + free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); + + ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", + free_res_idx, rm->n_grp_count); + + if (rm->n_grp_count > 1) { + if (rm->n_grp_count > free_res_idx) + return ICE_ERR_MAX_LIMIT; + + rm->n_grp_count++; + } + + if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) + return ICE_ERR_MAX_LIMIT; + + tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), + GFP_KERNEL); + if (!buf) { + status = ICE_ERR_NO_MEMORY; + goto err_mem; + } + + bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); + recipe_count = ICE_MAX_NUM_RECIPES; + status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, + NULL); + if (status || recipe_count == 0) + goto err_unroll; + + /* Allocate the recipe resources, and configure them according to the + * match fields from protocol headers and extracted field vectors. + */ + chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + list_for_each_entry(entry, &rm->rg_list, l_entry) { + u8 i; + + status = ice_alloc_recipe(hw, &entry->rid); + if (status) + goto err_unroll; + + /* Clear the result index of the located recipe, as this will be + * updated, if needed, later in the recipe creation process. + */ + tmp[0].content.result_indx = 0; + + buf[recps] = tmp[0]; + buf[recps].recipe_indx = (u8)entry->rid; + /* if the recipe is a non-root recipe RID should be programmed + * as 0 for the rules to be applied correctly. + */ + buf[recps].content.rid = 0; + memset(&buf[recps].content.lkup_indx, 0, + sizeof(buf[recps].content.lkup_indx)); + + /* All recipes use look-up index 0 to match switch ID. */ + buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + buf[recps].content.mask[0] = + cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); + /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask + * to be 0 + */ + for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { + buf[recps].content.lkup_indx[i] = 0x80; + buf[recps].content.mask[i] = 0; + } + + for (i = 0; i < entry->r_group.n_val_pairs; i++) { + buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; + buf[recps].content.mask[i + 1] = + cpu_to_le16(entry->fv_mask[i]); + } + + if (rm->n_grp_count > 1) { + /* Checks to see if there really is a valid result index + * that can be used. + */ + if (chain_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); + status = ICE_ERR_MAX_LIMIT; + goto err_unroll; + } + + entry->chain_idx = chain_idx; + buf[recps].content.result_indx = + ICE_AQ_RECIPE_RESULT_EN | + ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & + ICE_AQ_RECIPE_RESULT_DATA_M); + clear_bit(chain_idx, result_idx_bm); + chain_idx = find_first_bit(result_idx_bm, + ICE_MAX_FV_WORDS); + } + + /* fill recipe dependencies */ + bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, + ICE_MAX_NUM_RECIPES); + set_bit(buf[recps].recipe_indx, + (unsigned long *)buf[recps].recipe_bitmap); + buf[recps].content.act_ctrl_fwd_priority = rm->priority; + recps++; + } + + if (rm->n_grp_count == 1) { + rm->root_rid = buf[0].recipe_indx; + set_bit(buf[0].recipe_indx, rm->r_bitmap); + buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; + if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { + memcpy(buf[0].recipe_bitmap, rm->r_bitmap, + sizeof(buf[0].recipe_bitmap)); + } else { + status = ICE_ERR_BAD_PTR; + goto err_unroll; + } + /* Applicable only for ROOT_RECIPE, set the fwd_priority for + * the recipe which is getting created if specified + * by user. Usually any advanced switch filter, which results + * into new extraction sequence, ended up creating a new recipe + * of type ROOT and usually recipes are associated with profiles + * Switch rule referreing newly created recipe, needs to have + * either/or 'fwd' or 'join' priority, otherwise switch rule + * evaluation will not happen correctly. In other words, if + * switch rule to be evaluated on priority basis, then recipe + * needs to have priority, otherwise it will be evaluated last. + */ + buf[0].content.act_ctrl_fwd_priority = rm->priority; + } else { + struct ice_recp_grp_entry *last_chain_entry; + u16 rid, i; + + /* Allocate the last recipe that will chain the outcomes of the + * other recipes together + */ + status = ice_alloc_recipe(hw, &rid); + if (status) + goto err_unroll; + + buf[recps].recipe_indx = (u8)rid; + buf[recps].content.rid = (u8)rid; + buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; + /* the new entry created should also be part of rg_list to + * make sure we have complete recipe + */ + last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*last_chain_entry), + GFP_KERNEL); + if (!last_chain_entry) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + last_chain_entry->rid = rid; + memset(&buf[recps].content.lkup_indx, 0, + sizeof(buf[recps].content.lkup_indx)); + /* All recipes use look-up index 0 to match switch ID. */ + buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + buf[recps].content.mask[0] = + cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); + for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { + buf[recps].content.lkup_indx[i] = + ICE_AQ_RECIPE_LKUP_IGNORE; + buf[recps].content.mask[i] = 0; + } + + i = 1; + /* update r_bitmap with the recp that is used for chaining */ + set_bit(rid, rm->r_bitmap); + /* this is the recipe that chains all the other recipes so it + * should not have a chaining ID to indicate the same + */ + last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; + list_for_each_entry(entry, &rm->rg_list, l_entry) { + last_chain_entry->fv_idx[i] = entry->chain_idx; + buf[recps].content.lkup_indx[i] = entry->chain_idx; + buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); + set_bit(entry->rid, rm->r_bitmap); + } + list_add(&last_chain_entry->l_entry, &rm->rg_list); + if (sizeof(buf[recps].recipe_bitmap) >= + sizeof(rm->r_bitmap)) { + memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, + sizeof(buf[recps].recipe_bitmap)); + } else { + status = ICE_ERR_BAD_PTR; + goto err_unroll; + } + buf[recps].content.act_ctrl_fwd_priority = rm->priority; + + /* To differentiate among different UDP tunnels, a meta data ID + * flag is used. + */ + if (match_tun_mask) { + buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND; + buf[recps].content.mask[i] = + cpu_to_le16(match_tun_mask); + } + + recps++; + rm->root_rid = (u8)rid; + } + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + goto err_unroll; + + status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); + ice_release_change_lock(hw); + if (status) + goto err_unroll; + + /* Every recipe that just got created add it to the recipe + * book keeping list + */ + list_for_each_entry(entry, &rm->rg_list, l_entry) { + struct ice_switch_info *sw = hw->switch_info; + bool is_root, idx_found = false; + struct ice_sw_recipe *recp; + u16 idx, buf_idx = 0; + + /* find buffer index for copying some data */ + for (idx = 0; idx < rm->n_grp_count; idx++) + if (buf[idx].recipe_indx == entry->rid) { + buf_idx = idx; + idx_found = true; + } + + if (!idx_found) { + status = ICE_ERR_OUT_OF_RANGE; + goto err_unroll; + } + + recp = &sw->recp_list[entry->rid]; + is_root = (rm->root_rid == entry->rid); + recp->is_root = is_root; + + recp->root_rid = entry->rid; + recp->big_recp = (is_root && rm->n_grp_count > 1); + + memcpy(&recp->ext_words, entry->r_group.pairs, + entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); + + memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, + sizeof(recp->r_bitmap)); + + /* Copy non-result fv index values and masks to recipe. This + * call will also update the result recipe bitmask. + */ + ice_collect_result_idx(&buf[buf_idx], recp); + + /* for non-root recipes, also copy to the root, this allows + * easier matching of a complete chained recipe + */ + if (!is_root) + ice_collect_result_idx(&buf[buf_idx], + &sw->recp_list[rm->root_rid]); + + recp->n_ext_words = entry->r_group.n_val_pairs; + recp->chain_idx = entry->chain_idx; + recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; + recp->n_grp_count = rm->n_grp_count; + recp->recp_created = true; + } + rm->root_buf = buf; + kfree(tmp); + return status; + +err_unroll: +err_mem: + kfree(tmp); + devm_kfree(ice_hw_to_dev(hw), buf); + return status; +} + +/** + * ice_create_recipe_group - creates recipe group + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @lkup_exts: lookup elements + */ +static enum ice_status +ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, + struct ice_prot_lkup_ext *lkup_exts) +{ + enum ice_status status; + u8 recp_count = 0; + + rm->n_grp_count = 0; + + /* Create recipes for words that are marked not done by packing them + * as best fit. + */ + status = ice_create_first_fit_recp_def(hw, lkup_exts, + &rm->rg_list, &recp_count); + if (!status) { + rm->n_grp_count += recp_count; + rm->n_ext_words = lkup_exts->n_val_words; + memcpy(&rm->ext_words, lkup_exts->fv_words, + sizeof(rm->ext_words)); + memcpy(rm->word_masks, lkup_exts->field_mask, + sizeof(rm->word_masks)); + } + + return status; +} + +/** + * ice_get_fv - get field vectors/extraction sequences for spec. lookup types + * @hw: pointer to hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @bm: bitmap of field vectors to consider + * @fv_list: pointer to a list that holds the returned field vectors + */ +static enum ice_status +ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + unsigned long *bm, struct list_head *fv_list) +{ + enum ice_status status; + u8 *prot_ids; + u16 i; + + prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); + if (!prot_ids) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < lkups_cnt; i++) + if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { + status = ICE_ERR_CFG; + goto free_mem; + } + + /* Find field vectors that include all specified protocol types */ + status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); + +free_mem: + kfree(prot_ids); + return status; +} + +/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule + * @hw: pointer to hardware structure + * @rinfo: other information regarding the rule e.g. priority and action info + * @bm: pointer to memory for returning the bitmap of field vectors + */ +static void +ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, + unsigned long *bm) +{ + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + + ice_get_sw_fv_bitmap(hw, ICE_PROF_NON_TUN, bm); +} + +/** + * ice_add_adv_recipe - Add an advanced recipe that is not part of the default + * @hw: pointer to hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @rinfo: other information regarding the rule e.g. priority and action info + * @rid: return the recipe ID of the recipe created + */ +static enum ice_status +ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) +{ + DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); + DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); + struct ice_prot_lkup_ext *lkup_exts; + struct ice_recp_grp_entry *r_entry; + struct ice_sw_fv_list_entry *fvit; + struct ice_recp_grp_entry *r_tmp; + struct ice_sw_fv_list_entry *tmp; + enum ice_status status = 0; + struct ice_sw_recipe *rm; + u16 match_tun_mask = 0; + u8 i; + + if (!lkups_cnt) + return ICE_ERR_PARAM; + + lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); + if (!lkup_exts) + return ICE_ERR_NO_MEMORY; + + /* Determine the number of words to be matched and if it exceeds a + * recipe's restrictions + */ + for (i = 0; i < lkups_cnt; i++) { + u16 count; + + if (lkups[i].type >= ICE_PROTOCOL_LAST) { + status = ICE_ERR_CFG; + goto err_free_lkup_exts; + } + + count = ice_fill_valid_words(&lkups[i], lkup_exts); + if (!count) { + status = ICE_ERR_CFG; + goto err_free_lkup_exts; + } + } + + rm = kzalloc(sizeof(*rm), GFP_KERNEL); + if (!rm) { + status = ICE_ERR_NO_MEMORY; + goto err_free_lkup_exts; + } + + /* Get field vectors that contain fields extracted from all the protocol + * headers being programmed. + */ + INIT_LIST_HEAD(&rm->fv_list); + INIT_LIST_HEAD(&rm->rg_list); + + /* Get bitmap of field vectors (profiles) that are compatible with the + * rule request; only these will be searched in the subsequent call to + * ice_get_fv. + */ + ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); + + status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); + if (status) + goto err_unroll; + + /* Group match words into recipes using preferred recipe grouping + * criteria. + */ + status = ice_create_recipe_group(hw, rm, lkup_exts); + if (status) + goto err_unroll; + + /* set the recipe priority if specified */ + rm->priority = (u8)rinfo->priority; + + /* Find offsets from the field vector. Pick the first one for all the + * recipes. + */ + status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + if (status) + goto err_unroll; + + /* get bitmap of all profiles the recipe will be associated with */ + bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); + list_for_each_entry(fvit, &rm->fv_list, list_entry) { + ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); + set_bit((u16)fvit->profile_id, profiles); + } + + /* Look for a recipe which matches our requested fv / mask list */ + *rid = ice_find_recp(hw, lkup_exts); + if (*rid < ICE_MAX_NUM_RECIPES) + /* Success if found a recipe that match the existing criteria */ + goto err_unroll; + + /* Recipe we need does not exist, add a recipe */ + status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles); + if (status) + goto err_unroll; + + /* Associate all the recipes created with all the profiles in the + * common field vector. + */ + list_for_each_entry(fvit, &rm->fv_list, list_entry) { + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u16 j; + + status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, + (u8 *)r_bitmap, NULL); + if (status) + goto err_unroll; + + bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, + ICE_MAX_NUM_RECIPES); + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + goto err_unroll; + + status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, + (u8 *)r_bitmap, + NULL); + ice_release_change_lock(hw); + + if (status) + goto err_unroll; + + /* Update profile to recipe bitmap array */ + bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, + ICE_MAX_NUM_RECIPES); + + /* Update recipe to profile bitmap array */ + for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) + set_bit((u16)fvit->profile_id, recipe_to_profile[j]); + } + + *rid = rm->root_rid; + memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, + sizeof(*lkup_exts)); +err_unroll: + list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { + list_del(&r_entry->l_entry); + devm_kfree(ice_hw_to_dev(hw), r_entry); + } + + list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { + list_del(&fvit->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvit); + } + + if (rm->root_buf) + devm_kfree(ice_hw_to_dev(hw), rm->root_buf); + + kfree(rm); + +err_free_lkup_exts: + kfree(lkup_exts); + + return status; +} + +/** + * ice_find_dummy_packet - find dummy packet + * + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @pkt: dummy packet to fill according to filter match criteria + * @pkt_len: packet length of dummy packet + * @offsets: pointer to receive the pointer to the offsets for the packet + */ +static void +ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + const u8 **pkt, u16 *pkt_len, + const struct ice_dummy_pkt_offsets **offsets) +{ + bool tcp = false, udp = false, ipv6 = false, vlan = false; + u16 i; + + for (i = 0; i < lkups_cnt; i++) { + if (lkups[i].type == ICE_UDP_ILOS) + udp = true; + else if (lkups[i].type == ICE_TCP_IL) + tcp = true; + else if (lkups[i].type == ICE_IPV6_OFOS) + ipv6 = true; + else if (lkups[i].type == ICE_VLAN_OFOS) + vlan = true; + else if (lkups[i].type == ICE_ETYPE_OL && + lkups[i].h_u.ethertype.ethtype_id == + cpu_to_be16(ICE_IPV6_ETHER_ID) && + lkups[i].m_u.ethertype.ethtype_id == + cpu_to_be16(0xFFFF)) + ipv6 = true; + } + + if (udp && !ipv6) { + if (vlan) { + *pkt = dummy_vlan_udp_packet; + *pkt_len = sizeof(dummy_vlan_udp_packet); + *offsets = dummy_vlan_udp_packet_offsets; + return; + } + *pkt = dummy_udp_packet; + *pkt_len = sizeof(dummy_udp_packet); + *offsets = dummy_udp_packet_offsets; + return; + } else if (udp && ipv6) { + if (vlan) { + *pkt = dummy_vlan_udp_ipv6_packet; + *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); + *offsets = dummy_vlan_udp_ipv6_packet_offsets; + return; + } + *pkt = dummy_udp_ipv6_packet; + *pkt_len = sizeof(dummy_udp_ipv6_packet); + *offsets = dummy_udp_ipv6_packet_offsets; + return; + } else if ((tcp && ipv6) || ipv6) { + if (vlan) { + *pkt = dummy_vlan_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); + *offsets = dummy_vlan_tcp_ipv6_packet_offsets; + return; + } + *pkt = dummy_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_tcp_ipv6_packet); + *offsets = dummy_tcp_ipv6_packet_offsets; + return; + } + + if (vlan) { + *pkt = dummy_vlan_tcp_packet; + *pkt_len = sizeof(dummy_vlan_tcp_packet); + *offsets = dummy_vlan_tcp_packet_offsets; + } else { + *pkt = dummy_tcp_packet; + *pkt_len = sizeof(dummy_tcp_packet); + *offsets = dummy_tcp_packet_offsets; + } +} + +/** + * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria + * + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @s_rule: stores rule information from the match criteria + * @dummy_pkt: dummy packet to fill according to filter match criteria + * @pkt_len: packet length of dummy packet + * @offsets: offset info for the dummy packet + */ +static enum ice_status +ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + struct ice_aqc_sw_rules_elem *s_rule, + const u8 *dummy_pkt, u16 pkt_len, + const struct ice_dummy_pkt_offsets *offsets) +{ + u8 *pkt; + u16 i; + + /* Start with a packet with a pre-defined/dummy content. Then, fill + * in the header values to be looked up or matched. + */ + pkt = s_rule->pdata.lkup_tx_rx.hdr; + + memcpy(pkt, dummy_pkt, pkt_len); + + for (i = 0; i < lkups_cnt; i++) { + enum ice_protocol_type type; + u16 offset = 0, len = 0, j; + bool found = false; + + /* find the start of this layer; it should be found since this + * was already checked when search for the dummy packet + */ + type = lkups[i].type; + for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { + if (type == offsets[j].type) { + offset = offsets[j].offset; + found = true; + break; + } + } + /* this should never happen in a correct calling sequence */ + if (!found) + return ICE_ERR_PARAM; + + switch (lkups[i].type) { + case ICE_MAC_OFOS: + case ICE_MAC_IL: + len = sizeof(struct ice_ether_hdr); + break; + case ICE_ETYPE_OL: + len = sizeof(struct ice_ethtype_hdr); + break; + case ICE_VLAN_OFOS: + len = sizeof(struct ice_vlan_hdr); + break; + case ICE_IPV4_OFOS: + case ICE_IPV4_IL: + len = sizeof(struct ice_ipv4_hdr); + break; + case ICE_IPV6_OFOS: + case ICE_IPV6_IL: + len = sizeof(struct ice_ipv6_hdr); + break; + case ICE_TCP_IL: + case ICE_UDP_OF: + case ICE_UDP_ILOS: + len = sizeof(struct ice_l4_hdr); + break; + case ICE_SCTP_IL: + len = sizeof(struct ice_sctp_hdr); + break; + default: + return ICE_ERR_PARAM; + } + + /* the length should be a word multiple */ + if (len % ICE_BYTES_PER_WORD) + return ICE_ERR_CFG; + + /* We have the offset to the header start, the length, the + * caller's header values and mask. Use this information to + * copy the data into the dummy packet appropriately based on + * the mask. Note that we need to only write the bits as + * indicated by the mask to make sure we don't improperly write + * over any significant packet data. + */ + for (j = 0; j < len / sizeof(u16); j++) + if (((u16 *)&lkups[i].m_u)[j]) + ((u16 *)(pkt + offset))[j] = + (((u16 *)(pkt + offset))[j] & + ~((u16 *)&lkups[i].m_u)[j]) | + (((u16 *)&lkups[i].h_u)[j] & + ((u16 *)&lkups[i].m_u)[j]); + } + + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); + + return 0; +} + +/** + * ice_find_adv_rule_entry - Search a rule entry + * @hw: pointer to the hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @recp_id: recipe ID for which we are finding the rule + * @rinfo: other information regarding the rule e.g. priority and action info + * + * Helper function to search for a given advance rule entry + * Returns pointer to entry storing the rule if found + */ +static struct ice_adv_fltr_mgmt_list_entry * +ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, u16 recp_id, + struct ice_adv_rule_info *rinfo) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct ice_switch_info *sw = hw->switch_info; + int i; + + list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, + list_entry) { + bool lkups_matched = true; + + if (lkups_cnt != list_itr->lkups_cnt) + continue; + for (i = 0; i < list_itr->lkups_cnt; i++) + if (memcmp(&list_itr->lkups[i], &lkups[i], + sizeof(*lkups))) { + lkups_matched = false; + break; + } + if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && + lkups_matched) + return list_itr; + } + return NULL; +} + +/** + * ice_adv_add_update_vsi_list + * @hw: pointer to the hardware structure + * @m_entry: pointer to current adv filter management list entry + * @cur_fltr: filter information from the book keeping entry + * @new_fltr: filter information with the new VSI to be added + * + * Call AQ command to add or update previously created VSI list with new VSI. + * + * Helper function to do book keeping associated with adding filter information + * The algorithm to do the booking keeping is described below : + * When a VSI needs to subscribe to a given advanced filter + * if only one VSI has been added till now + * Allocate a new VSI list and add two VSIs + * to this list using switch rule command + * Update the previously created switch rule with the + * newly created VSI list ID + * if a VSI list was previously created + * Add the new VSI to the previously created VSI list set + * using the update switch rule command + */ +static enum ice_status +ice_adv_add_update_vsi_list(struct ice_hw *hw, + struct ice_adv_fltr_mgmt_list_entry *m_entry, + struct ice_adv_rule_info *cur_fltr, + struct ice_adv_rule_info *new_fltr) +{ + enum ice_status status; + u16 vsi_list_id = 0; + + if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || + cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) + return ICE_ERR_NOT_IMPL; + + if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || + new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && + (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) + return ICE_ERR_NOT_IMPL; + + if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { + /* Only one entry existed in the mapping and it was not already + * a part of a VSI list. So, create a VSI list with the old and + * new VSIs. + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ + if (cur_fltr->sw_act.fwd_id.hw_vsi_id == + new_fltr->sw_act.fwd_id.hw_vsi_id) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; + vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, + ICE_SW_LKUP_LAST); + if (status) + return status; + + memset(&tmp_fltr, 0, sizeof(tmp_fltr)); + tmp_fltr.flag = m_entry->rule_info.sw_act.flag; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; + + /* Update the previous switch rule of "forward to VSI" to + * "fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + return status; + + cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; + cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; + m_entry->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + } else { + u16 vsi_handle = new_fltr->sw_act.vsi_handle; + + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + + /* A rule already exists with the new VSI being added */ + if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) + return 0; + + /* Update the previously created VSI list set with + * the new VSI ID passed in + */ + vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; + + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, + vsi_list_id, false, + ice_aqc_opc_update_sw_rules, + ICE_SW_LKUP_LAST); + /* update VSI list mapping info with new VSI ID */ + if (!status) + set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); + } + if (!status) + m_entry->vsi_count++; + return status; +} + +/** + * ice_add_adv_rule - helper function to create an advanced switch rule + * @hw: pointer to the hardware structure + * @lkups: information on the words that needs to be looked up. All words + * together makes one recipe + * @lkups_cnt: num of entries in the lkups array + * @rinfo: other information related to the rule that needs to be programmed + * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be + * ignored is case of error. + * + * This function can program only 1 rule at a time. The lkups is used to + * describe the all the words that forms the "lookup" portion of the recipe. + * These words can span multiple protocols. Callers to this function need to + * pass in a list of protocol headers with lookup information along and mask + * that determines which words are valid from the given protocol header. + * rinfo describes other information related to this rule such as forwarding + * IDs, priority of this rule, etc. + */ +enum ice_status +ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *added_entry) +{ + struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; + u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; + const struct ice_dummy_pkt_offsets *pkt_offsets; + struct ice_aqc_sw_rules_elem *s_rule = NULL; + struct list_head *rule_head; + struct ice_switch_info *sw; + enum ice_status status; + const u8 *pkt = NULL; + u16 word_cnt; + u32 act = 0; + u8 q_rgn; + + /* Initialize profile to result index bitmap */ + if (!hw->switch_info->prof_res_bm_init) { + hw->switch_info->prof_res_bm_init = 1; + ice_init_prof_result_bm(hw); + } + + if (!lkups_cnt) + return ICE_ERR_PARAM; + + /* get # of words we need to match */ + word_cnt = 0; + for (i = 0; i < lkups_cnt; i++) { + u16 j, *ptr; + + ptr = (u16 *)&lkups[i].m_u; + for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) + if (ptr[j] != 0) + word_cnt++; + } + + if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) + return ICE_ERR_PARAM; + + /* make sure that we can locate a dummy packet */ + ice_find_dummy_packet(lkups, lkups_cnt, &pkt, &pkt_len, + &pkt_offsets); + if (!pkt) { + status = ICE_ERR_PARAM; + goto err_ice_add_adv_rule; + } + + if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || + rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || + rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || + rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) + return ICE_ERR_CFG; + + vsi_handle = rinfo->sw_act.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + rinfo->sw_act.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (rinfo->sw_act.flag & ICE_FLTR_TX) + rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); + + status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); + if (status) + return status; + m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); + if (m_entry) { + /* we have to add VSI to VSI_LIST and increment vsi_count. + * Also Update VSI list so that we can change forwarding rule + * if the rule already exists, we will check if it exists with + * same vsi_id, if not then add it to the VSI list if it already + * exists if not then create a VSI list and add the existing VSI + * ID and the new VSI ID to the list + * We will add that VSI to the list + */ + status = ice_adv_add_update_vsi_list(hw, m_entry, + &m_entry->rule_info, + rinfo); + if (added_entry) { + added_entry->rid = rid; + added_entry->rule_id = m_entry->rule_info.fltr_rule_id; + added_entry->vsi_handle = rinfo->sw_act.vsi_handle; + } + return status; + } + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; + switch (rinfo->sw_act.fltr_act) { + case ICE_FWD_TO_VSI: + act |= (rinfo->sw_act.fwd_id.hw_vsi_id << + ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_Q: + act |= ICE_SINGLE_ACT_TO_Q; + act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + break; + case ICE_FWD_TO_QGRP: + q_rgn = rinfo->sw_act.qgrp_size > 0 ? + (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; + act |= ICE_SINGLE_ACT_TO_Q; + act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & + ICE_SINGLE_ACT_Q_REGION_M; + break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; + default: + status = ICE_ERR_CFG; + goto err_ice_add_adv_rule; + } + + /* set the rule LOOKUP type based on caller specified 'Rx' + * instead of hardcoding it to be either LOOKUP_TX/RX + * + * for 'Rx' set the source to be the port number + * for 'Tx' set the source to be the source HW VSI number (determined + * by caller) + */ + if (rinfo->rx) { + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->pdata.lkup_tx_rx.src = + cpu_to_le16(hw->port_info->lport); + } else { + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); + } + + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, + pkt_len, pkt_offsets); + if (status) + goto err_ice_add_adv_rule; + + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, + NULL); + if (status) + goto err_ice_add_adv_rule; + adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(struct ice_adv_fltr_mgmt_list_entry), + GFP_KERNEL); + if (!adv_fltr) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_adv_rule; + } + + adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, + lkups_cnt * sizeof(*lkups), GFP_KERNEL); + if (!adv_fltr->lkups) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_adv_rule; + } + + adv_fltr->lkups_cnt = lkups_cnt; + adv_fltr->rule_info = *rinfo; + adv_fltr->rule_info.fltr_rule_id = + le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); + sw = hw->switch_info; + sw->recp_list[rid].adv_rule = true; + rule_head = &sw->recp_list[rid].filt_rules; + + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + adv_fltr->vsi_count = 1; + + /* Add rule entry to book keeping list */ + list_add(&adv_fltr->list_entry, rule_head); + if (added_entry) { + added_entry->rid = rid; + added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; + added_entry->vsi_handle = rinfo->sw_act.vsi_handle; + } +err_ice_add_adv_rule: + if (status && adv_fltr) { + devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); + devm_kfree(ice_hw_to_dev(hw), adv_fltr); + } + + kfree(s_rule); + + return status; +} + /** * ice_replay_vsi_fltr - Replay filters for requested VSI * @hw: pointer to the hardware structure @@ -2831,6 +4944,229 @@ end: } /** + * ice_adv_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, + struct ice_adv_fltr_mgmt_list_entry *fm_list) +{ + struct ice_vsi_list_map_info *vsi_list_info; + enum ice_sw_lkup_type lkup_type; + enum ice_status status; + u16 vsi_list_id; + + if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = ICE_SW_LKUP_LAST; + vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); + vsi_list_info = fm_list->vsi_list_info; + if (fm_list->vsi_count == 1) { + struct ice_fltr_info tmp_fltr; + u16 rem_vsi_handle; + + rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, rem_vsi_handle)) + return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ + status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + memset(&tmp_fltr, 0, sizeof(tmp_fltr)); + tmp_fltr.flag = fm_list->rule_info.sw_act.flag; + tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; + fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; + + /* Update the previous switch rule of "MAC forward to VSI" to + * "MAC fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) { + ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr.fwd_id.hw_vsi_id, status); + return status; + } + fm_list->vsi_list_info->ref_cnt--; + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) { + ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); + return status; + } + + list_del(&vsi_list_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_rem_adv_rule - removes existing advanced switch rule + * @hw: pointer to the hardware structure + * @lkups: information on the words that needs to be looked up. All words + * together makes one recipe + * @lkups_cnt: num of entries in the lkups array + * @rinfo: Its the pointer to the rule information for the rule + * + * This function can be used to remove 1 rule at a time. The lkups is + * used to describe all the words that forms the "lookup" portion of the + * rule. These words can span multiple protocols. Callers to this function + * need to pass in a list of protocol headers with lookup information along + * and mask that determines which words are valid from the given protocol + * header. rinfo describes other information related to this rule such as + * forwarding IDs, priority of this rule, etc. + */ +static enum ice_status +ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo) +{ + struct ice_adv_fltr_mgmt_list_entry *list_elem; + struct ice_prot_lkup_ext lkup_exts; + enum ice_status status = 0; + bool remove_rule = false; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + u16 i, rid, vsi_handle; + + memset(&lkup_exts, 0, sizeof(lkup_exts)); + for (i = 0; i < lkups_cnt; i++) { + u16 count; + + if (lkups[i].type >= ICE_PROTOCOL_LAST) + return ICE_ERR_CFG; + + count = ice_fill_valid_words(&lkups[i], &lkup_exts); + if (!count) + return ICE_ERR_CFG; + } + + rid = ice_find_recp(hw, &lkup_exts); + /* If did not find a recipe that match the existing criteria */ + if (rid == ICE_MAX_NUM_RECIPES) + return ICE_ERR_PARAM; + + rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; + list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); + /* the rule is already removed */ + if (!list_elem) + return 0; + mutex_lock(rule_lock); + if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else if (list_elem->vsi_count > 1) { + remove_rule = false; + vsi_handle = rinfo->sw_act.vsi_handle; + status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); + } else { + vsi_handle = rinfo->sw_act.vsi_handle; + status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); + if (status) { + mutex_unlock(rule_lock); + return status; + } + if (list_elem->vsi_count == 0) + remove_rule = true; + } + mutex_unlock(rule_lock); + if (remove_rule) { + struct ice_aqc_sw_rules_elem *s_rule; + u16 rule_buf_sz; + + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + s_rule->pdata.lkup_tx_rx.act = 0; + s_rule->pdata.lkup_tx_rx.index = + cpu_to_le16(list_elem->rule_info.fltr_rule_id); + s_rule->pdata.lkup_tx_rx.hdr_len = 0; + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (!status || status == ICE_ERR_DOES_NOT_EXIST) { + struct ice_switch_info *sw = hw->switch_info; + + mutex_lock(rule_lock); + list_del(&list_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); + devm_kfree(ice_hw_to_dev(hw), list_elem); + mutex_unlock(rule_lock); + if (list_empty(&sw->recp_list[rid].filt_rules)) + sw->recp_list[rid].adv_rule = false; + } + kfree(s_rule); + } + return status; +} + +/** + * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID + * @hw: pointer to the hardware structure + * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID + * + * This function is used to remove 1 rule at a time. The removal is based on + * the remove_entry parameter. This function will remove rule for a given + * vsi_handle with a given rule_id which is passed as parameter in remove_entry + */ +enum ice_status +ice_rem_adv_rule_by_id(struct ice_hw *hw, + struct ice_rule_query_data *remove_entry) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + struct ice_adv_rule_info rinfo; + struct ice_switch_info *sw; + + sw = hw->switch_info; + if (!sw->recp_list[remove_entry->rid].recp_created) + return ICE_ERR_PARAM; + list_head = &sw->recp_list[remove_entry->rid].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (list_itr->rule_info.fltr_rule_id == + remove_entry->rule_id) { + rinfo = list_itr->rule_info; + rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; + return ice_rem_adv_rule(hw, list_itr->lkups, + list_itr->lkups_cnt, &rinfo); + } + } + /* either list is empty or unable to find rule */ + return ICE_ERR_DOES_NOT_EXIST; +} + +/** * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists * @hw: pointer to the hardware structure * @vsi_handle: driver VSI handle @@ -2868,12 +5204,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) if (!sw) return; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { struct list_head *l_head; l_head = &sw->recp_list[i].filt_replay_rules; - ice_rem_sw_rule_info(hw, l_head); + if (!sw->recp_list[i].adv_rule) + ice_rem_sw_rule_info(hw, l_head); + else + ice_rem_adv_rule_info(hw, l_head); } } } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index c5db8d56133f..34b7f74b1ab8 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -14,6 +14,9 @@ #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ + (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) + /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; @@ -122,30 +125,110 @@ struct ice_fltr_info { u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; +struct ice_adv_lkup_elem { + enum ice_protocol_type type; + union ice_prot_hdr h_u; /* Header values */ + union ice_prot_hdr m_u; /* Mask of header values to match */ +}; + +struct ice_sw_act_ctrl { + /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ + u16 src; + u16 flag; + enum ice_sw_fwd_act_type fltr_act; + /* Depending on filter action */ + union { + /* This is a queue ID in case of ICE_FWD_TO_Q and starting + * queue ID in case of ICE_FWD_TO_QGRP. + */ + u16 q_id:11; + u16 vsi_id:10; + u16 hw_vsi_id:10; + u16 vsi_list_id:10; + } fwd_id; + /* software VSI handle */ + u16 vsi_handle; + u8 qgrp_size; +}; + +struct ice_rule_query_data { + /* Recipe ID for which the requested rule was added */ + u16 rid; + /* Rule ID that was added or is supposed to be removed */ + u16 rule_id; + /* vsi_handle for which Rule was added or is supposed to be removed */ + u16 vsi_handle; +}; + +struct ice_adv_rule_info { + struct ice_sw_act_ctrl sw_act; + u32 priority; + u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ + u16 fltr_rule_id; +}; + +/* A collection of one or more four word recipe */ struct ice_sw_recipe { - struct list_head l_entry; + /* For a chained recipe the root recipe is what should be used for + * programming rules + */ + u8 is_root; + u8 root_rid; + u8 recp_created; - /* To protect modification of filt_rule list - * defined below + /* Number of extraction words */ + u8 n_ext_words; + /* Protocol ID and Offset pair (extraction word) to describe the + * recipe */ - struct mutex filt_rule_lock; + struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; + u16 word_masks[ICE_MAX_CHAIN_WORDS]; + + /* if this recipe is a collection of other recipe */ + u8 big_recp; - /* List of type ice_fltr_mgmt_list_entry */ + /* if this recipe is part of another bigger recipe then chain index + * corresponding to this recipe + */ + u8 chain_idx; + + /* if this recipe is a collection of other recipe then count of other + * recipes and recipe IDs of those recipes + */ + u8 n_grp_count; + + /* Bit map specifying the IDs associated with this group of recipe */ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + + /* List of type ice_fltr_mgmt_list_entry or adv_rule */ + u8 adv_rule; struct list_head filt_rules; struct list_head filt_replay_rules; - /* linked list of type recipe_list_entry */ - struct list_head rg_list; - /* linked list of type ice_sw_fv_list_entry*/ + struct mutex filt_rule_lock; /* protect filter rule structure */ + + /* Profiles this recipe should be associated with */ struct list_head fv_list; - struct ice_aqc_recipe_data_elem *r_buf; - u8 recp_count; - u8 root_rid; - u8 num_profs; - u8 *prof_ids; - /* recipe bitmap: what all recipes makes this recipe */ - DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + /* Profiles this recipe is associated with */ + u8 num_profs, *prof_ids; + + /* Bit map for possible result indexes */ + DECLARE_BITMAP(res_idxs, ICE_MAX_FV_WORDS); + + /* This allows user to specify the recipe priority. + * For now, this becomes 'fwd_priority' when recipe + * is created, usually recipes can have 'fwd' and 'join' + * priority. + */ + u8 priority; + + struct list_head rg_list; + + /* AQ buffer associated with this recipe */ + struct ice_aqc_recipe_data_elem *root_buf; + /* This struct saves the fv_words for a given lookup */ + struct ice_prot_lkup_ext lkup_exts; }; /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */ @@ -183,6 +266,16 @@ struct ice_fltr_mgmt_list_entry { u8 counter_index; }; +struct ice_adv_fltr_mgmt_list_entry { + struct list_head list_entry; + + struct ice_adv_lkup_elem *lkups; + struct ice_adv_rule_info rule_info; + u16 lkups_cnt; + struct ice_vsi_list_map_info *vsi_list_info; + u16 vsi_count; +}; + enum ice_promisc_flags { ICE_PROMISC_UCAST_RX = 0x1, ICE_PROMISC_UCAST_TX = 0x2, @@ -218,6 +311,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); /* Switch/bridge related commands */ +enum ice_status +ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *added_entry); enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); @@ -245,10 +342,19 @@ enum ice_status ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); +enum ice_status +ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); +enum ice_status +ice_rem_adv_rule_by_id(struct ice_hw *hw, + struct ice_rule_query_data *remove_entry); + enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); +enum ice_status +ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, + u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c new file mode 100644 index 000000000000..4c1daa1a02a1 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -0,0 +1,855 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_tc_lib.h" +#include "ice_lib.h" +#include "ice_fltr.h" + +/** + * ice_tc_count_lkups - determine lookup count for switch filter + * @flags: TC-flower flags + * @headers: Pointer to TC flower filter header structure + * @fltr: Pointer to outer TC filter structure + * + * Determine lookup count based on TC flower input for switch filter. + */ +static int +ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, + struct ice_tc_flower_fltr *fltr) +{ + int lkups_cnt = 0; + + if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) + lkups_cnt++; + + /* are MAC fields specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) + lkups_cnt++; + + /* is VLAN specified? */ + if (flags & ICE_TC_FLWR_FIELD_VLAN) + lkups_cnt++; + + /* are IPv[4|6] fields specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4)) + lkups_cnt++; + else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | + ICE_TC_FLWR_FIELD_SRC_IPV6)) + lkups_cnt++; + + /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | + ICE_TC_FLWR_FIELD_SRC_L4_PORT)) + lkups_cnt++; + + return lkups_cnt; +} + +/** + * ice_tc_fill_rules - fill filter rules based on TC fltr + * @hw: pointer to HW structure + * @flags: tc flower field flags + * @tc_fltr: pointer to TC flower filter + * @list: list of advance rule elements + * @rule_info: pointer to information about rule + * @l4_proto: pointer to information such as L4 proto type + * + * Fill ice_adv_lkup_elem list based on TC flower flags and + * TC flower headers. This list should be used to add + * advance filter in hardware. + */ +static int +ice_tc_fill_rules(struct ice_hw *hw, u32 flags, + struct ice_tc_flower_fltr *tc_fltr, + struct ice_adv_lkup_elem *list, + struct ice_adv_rule_info *rule_info, + u16 *l4_proto) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; + int i = 0; + + if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { + list[i].type = ICE_ETYPE_OL; + list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; + list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; + i++; + } + + if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | + ICE_TC_FLWR_FIELD_SRC_MAC)) { + struct ice_tc_l2_hdr *l2_key, *l2_mask; + + l2_key = &headers->l2_key; + l2_mask = &headers->l2_mask; + + list[i].type = ICE_MAC_OFOS; + if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { + ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, + l2_key->dst_mac); + ether_addr_copy(list[i].m_u.eth_hdr.dst_addr, + l2_mask->dst_mac); + } + if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { + ether_addr_copy(list[i].h_u.eth_hdr.src_addr, + l2_key->src_mac); + ether_addr_copy(list[i].m_u.eth_hdr.src_addr, + l2_mask->src_mac); + } + i++; + } + + /* copy VLAN info */ + if (flags & ICE_TC_FLWR_FIELD_VLAN) { + list[i].type = ICE_VLAN_OFOS; + list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; + list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); + i++; + } + + /* copy L3 (IPv[4|6]: src, dest) address */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | + ICE_TC_FLWR_FIELD_SRC_IPV4)) { + struct ice_tc_l3_hdr *l3_key, *l3_mask; + + list[i].type = ICE_IPV4_OFOS; + l3_key = &headers->l3_key; + l3_mask = &headers->l3_mask; + if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { + list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4; + list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4; + } + if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { + list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4; + list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4; + } + i++; + } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | + ICE_TC_FLWR_FIELD_SRC_IPV6)) { + struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; + struct ice_tc_l3_hdr *l3_key, *l3_mask; + + list[i].type = ICE_IPV6_OFOS; + ipv6_hdr = &list[i].h_u.ipv6_hdr; + ipv6_mask = &list[i].m_u.ipv6_hdr; + l3_key = &headers->l3_key; + l3_mask = &headers->l3_mask; + + if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { + memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr, + sizeof(l3_key->dst_ipv6_addr)); + memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr, + sizeof(l3_mask->dst_ipv6_addr)); + } + if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { + memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr, + sizeof(l3_key->src_ipv6_addr)); + memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr, + sizeof(l3_mask->src_ipv6_addr)); + } + i++; + } + + /* copy L4 (src, dest) port */ + if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | + ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { + struct ice_tc_l4_hdr *l4_key, *l4_mask; + + l4_key = &headers->l4_key; + l4_mask = &headers->l4_mask; + if (headers->l3_key.ip_proto == IPPROTO_TCP) { + list[i].type = ICE_TCP_IL; + /* detected L4 proto is TCP */ + if (l4_proto) + *l4_proto = IPPROTO_TCP; + } else if (headers->l3_key.ip_proto == IPPROTO_UDP) { + list[i].type = ICE_UDP_ILOS; + /* detected L4 proto is UDP */ + if (l4_proto) + *l4_proto = IPPROTO_UDP; + } + if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { + list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; + list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; + } + if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { + list[i].h_u.l4_hdr.src_port = l4_key->src_port; + list[i].m_u.l4_hdr.src_port = l4_mask->src_port; + } + i++; + } + + return i; +} + +static int +ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, + struct flow_action_entry *act) +{ + struct ice_repr *repr; + + switch (act->id) { + case FLOW_ACTION_DROP: + fltr->action.fltr_act = ICE_DROP_PACKET; + break; + + case FLOW_ACTION_REDIRECT: + fltr->action.fltr_act = ICE_FWD_TO_VSI; + + if (ice_is_port_repr_netdev(act->dev)) { + repr = ice_netdev_to_repr(act->dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else if (netif_is_ice(act->dev)) { + struct ice_netdev_priv *np = netdev_priv(act->dev); + + fltr->dest_vsi = np->vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + break; + + default: + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); + return -EINVAL; + } + + return 0; +} + +static int +ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct ice_adv_rule_info rule_info = { 0 }; + struct ice_rule_query_data rule_added; + struct ice_hw *hw = &vsi->back->hw; + struct ice_adv_lkup_elem *list; + u32 flags = fltr->flags; + enum ice_status status; + int lkups_cnt; + int ret = 0; + int i; + + if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); + return -EOPNOTSUPP; + } + + lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) + return -ENOMEM; + + i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL); + if (i != lkups_cnt) { + ret = -EINVAL; + goto exit; + } + + rule_info.sw_act.fltr_act = fltr->action.fltr_act; + if (fltr->action.fltr_act != ICE_DROP_PACKET) + rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; + /* For now, making priority to be highest, and it also becomes + * the priority for recipe which will get created as a result of + * new extraction sequence based on input set. + * Priority '7' is max val for switch recipe, higher the number + * results into order of switch rule evaluation. + */ + rule_info.priority = 7; + + if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + rule_info.rx = true; + } else { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + rule_info.rx = false; + } + + /* specify the cookie as filter_rule_id */ + rule_info.fltr_rule_id = fltr->cookie; + + status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (status == ICE_ERR_ALREADY_EXISTS) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); + ret = -EINVAL; + goto exit; + } else if (status) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); + ret = -EIO; + goto exit; + } + + /* store the output params, which are needed later for removing + * advanced switch filter + */ + fltr->rid = rule_added.rid; + fltr->rule_id = rule_added.rule_id; + + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + if (ice_fltr_update_flags(vsi, fltr->rule_id, fltr->rid, + ICE_SINGLE_ACT_LAN_ENABLE)) + ice_rem_adv_rule_by_id(hw, &rule_added); + } + +exit: + kfree(list); + return ret; +} + +/** + * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter + * @match: Pointer to flow match structure + * @fltr: Pointer to filter structure + * @headers: inner or outer header fields + */ +static int +ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers) +{ + if (match->key->dst) { + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; + headers->l3_key.dst_ipv4 = match->key->dst; + headers->l3_mask.dst_ipv4 = match->mask->dst; + } + if (match->key->src) { + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; + headers->l3_key.src_ipv4 = match->key->src; + headers->l3_mask.src_ipv4 = match->mask->src; + } + return 0; +} + +/** + * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter + * @match: Pointer to flow match structure + * @fltr: Pointer to filter structure + * @headers: inner or outer header fields + */ +static int +ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers) +{ + struct ice_tc_l3_hdr *l3_key, *l3_mask; + + /* src and dest IPV6 address should not be LOOPBACK + * (0:0:0:0:0:0:0:1), which can be represented as ::1 + */ + if (ipv6_addr_loopback(&match->key->dst) || + ipv6_addr_loopback(&match->key->src)) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK"); + return -EINVAL; + } + /* if src/dest IPv6 address is *,* error */ + if (ipv6_addr_any(&match->mask->dst) && + ipv6_addr_any(&match->mask->src)) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); + return -EINVAL; + } + if (!ipv6_addr_any(&match->mask->dst)) + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; + if (!ipv6_addr_any(&match->mask->src)) + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; + + l3_key = &headers->l3_key; + l3_mask = &headers->l3_mask; + + if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { + memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, + sizeof(match->key->src.s6_addr)); + memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, + sizeof(match->mask->src.s6_addr)); + } + if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { + memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, + sizeof(match->key->dst.s6_addr)); + memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, + sizeof(match->mask->dst.s6_addr)); + } + + return 0; +} + +/** + * ice_tc_set_port - Parse ports from TC flower filter + * @match: Flow match structure + * @fltr: Pointer to filter structure + * @headers: inner or outer header fields + */ +static int +ice_tc_set_port(struct flow_match_ports match, + struct ice_tc_flower_fltr *fltr, + struct ice_tc_flower_lyr_2_4_hdrs *headers) +{ + if (match.key->dst) { + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; + headers->l4_key.dst_port = match.key->dst; + headers->l4_mask.dst_port = match.mask->dst; + } + if (match.key->src) { + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; + headers->l4_key.src_port = match.key->src; + headers->l4_mask.src_port = match.mask->src; + } + return 0; +} + +/** + * ice_parse_cls_flower - Parse TC flower filters provided by kernel + * @vsi: Pointer to the VSI + * @filter_dev: Pointer to device on which filter is being added + * @f: Pointer to struct flow_cls_offload + * @fltr: Pointer to filter structure + */ +static int +ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, + struct flow_cls_offload *f, + struct ice_tc_flower_fltr *fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; + struct flow_dissector *dissector; + + dissector = rule->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); + + if (n_proto_key == ETH_P_ALL || n_proto_key == 0) { + n_proto_key = 0; + n_proto_mask = 0; + } else { + fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; + } + + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); + headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); + headers->l3_key.ip_proto = match.key->ip_proto; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.key->dst)) { + ether_addr_copy(headers->l2_key.dst_mac, + match.key->dst); + ether_addr_copy(headers->l2_mask.dst_mac, + match.mask->dst); + fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; + } + + if (!is_zero_ether_addr(match.key->src)) { + ether_addr_copy(headers->l2_key.src_mac, + match.key->src); + ether_addr_copy(headers->l2_mask.src_mac, + match.mask->src); + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || + is_vlan_dev(filter_dev)) { + struct flow_dissector_key_vlan mask; + struct flow_dissector_key_vlan key; + struct flow_match_vlan match; + + if (is_vlan_dev(filter_dev)) { + match.key = &key; + match.key->vlan_id = vlan_dev_vlan_id(filter_dev); + match.key->vlan_priority = 0; + match.mask = &mask; + memset(match.mask, 0xff, sizeof(*match.mask)); + match.mask->vlan_priority = 0; + } else { + flow_rule_match_vlan(rule, &match); + } + + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { + fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask"); + return -EINVAL; + } + } + + headers->vlan_hdr.vlan_id = + cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); + if (match.mask->vlan_priority) + headers->vlan_hdr.vlan_prio = match.key->vlan_priority; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + + addr_type = match.key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (ice_tc_set_ipv4(&match, fltr, headers)) + return -EINVAL; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + if (ice_tc_set_ipv6(&match, fltr, headers)) + return -EINVAL; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if (ice_tc_set_port(match, fltr, headers)) + return -EINVAL; + switch (headers->l3_key.ip_proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + default: + NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported"); + return -EINVAL; + } + } + return 0; +} + +/** + * ice_add_switch_fltr - Add TC flower filters + * @vsi: Pointer to VSI + * @fltr: Pointer to struct ice_tc_flower_fltr + * + * Add filter in HW switch block + */ +static int +ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) +{ + if (ice_is_eswitch_mode_switchdev(vsi->back)) + return ice_eswitch_add_tc_fltr(vsi, fltr); + + return -EOPNOTSUPP; +} + +/** + * ice_handle_tclass_action - Support directing to a traffic class + * @vsi: Pointer to VSI + * @cls_flower: Pointer to TC flower offload structure + * @fltr: Pointer to TC flower filter structure + * + * Support directing traffic to a traffic class + */ +static int +ice_handle_tclass_action(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) +{ + int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); + + if (tc < 0) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid"); + return -EINVAL; + } + if (!tc) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + + if (!(vsi->tc_cfg.ena_tc & BIT(tc))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); + return -EINVAL; + } + + /* Redirect to a TC class or Queue Group */ + fltr->action.fltr_act = ICE_FWD_TO_QGRP; + fltr->action.tc_class = tc; + + return 0; +} + +/** + * ice_parse_tc_flower_actions - Parse the actions for a TC filter + * @vsi: Pointer to VSI + * @cls_flower: Pointer to TC flower offload structure + * @fltr: Pointer to TC flower filter structure + * + * Parse the actions for a TC filter + */ +static int +ice_parse_tc_flower_actions(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); + struct flow_action *flow_action = &rule->action; + struct flow_action_entry *act; + int i; + + if (cls_flower->classid) + return ice_handle_tclass_action(vsi, cls_flower, fltr); + + if (!flow_action_has_entries(flow_action)) + return -EINVAL; + + flow_action_for_each(i, act, flow_action) { + if (ice_is_eswitch_mode_switchdev(vsi->back)) { + int err = ice_eswitch_tc_parse_action(fltr, act); + + if (err) + return err; + continue; + } + /* Allow only one rule per filter */ + + /* Drop action */ + if (act->id == FLOW_ACTION_DROP) { + fltr->action.fltr_act = ICE_DROP_PACKET; + return 0; + } + fltr->action.fltr_act = ICE_FWD_TO_VSI; + } + return 0; +} + +/** + * ice_del_tc_fltr - deletes a filter from HW table + * @vsi: Pointer to VSI + * @fltr: Pointer to struct ice_tc_flower_fltr + * + * This function deletes a filter from HW table and manages book-keeping + */ +static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) +{ + struct ice_rule_query_data rule_rem; + struct ice_pf *pf = vsi->back; + int err; + + rule_rem.rid = fltr->rid; + rule_rem.rule_id = fltr->rule_id; + rule_rem.vsi_handle = fltr->dest_id; + err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); + if (err) { + if (err == ICE_ERR_DOES_NOT_EXIST) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); + return -ENOENT; + } + NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter"); + return -EIO; + } + + return 0; +} + +/** + * ice_add_tc_fltr - adds a TC flower filter + * @netdev: Pointer to netdev + * @vsi: Pointer to VSI + * @f: Pointer to flower offload structure + * @__fltr: Pointer to struct ice_tc_flower_fltr + * + * This function parses TC-flower input fields, parses action, + * and adds a filter. + */ +static int +ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *f, + struct ice_tc_flower_fltr **__fltr) +{ + struct ice_tc_flower_fltr *fltr; + int err; + + /* by default, set output to be INVALID */ + *__fltr = NULL; + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + fltr->cookie = f->cookie; + fltr->extack = f->common.extack; + fltr->src_vsi = vsi; + INIT_HLIST_NODE(&fltr->tc_flower_node); + + err = ice_parse_cls_flower(netdev, vsi, f, fltr); + if (err < 0) + goto err; + + err = ice_parse_tc_flower_actions(vsi, f, fltr); + if (err < 0) + goto err; + + err = ice_add_switch_fltr(vsi, fltr); + if (err < 0) + goto err; + + /* return the newly created filter */ + *__fltr = fltr; + + return 0; +err: + kfree(fltr); + return err; +} + +/** + * ice_find_tc_flower_fltr - Find the TC flower filter in the list + * @pf: Pointer to PF + * @cookie: filter specific cookie + */ +static struct ice_tc_flower_fltr * +ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) +{ + struct ice_tc_flower_fltr *fltr; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) + if (cookie == fltr->cookie) + return fltr; + + return NULL; +} + +/** + * ice_add_cls_flower - add TC flower filters + * @netdev: Pointer to filter device + * @vsi: Pointer to VSI + * @cls_flower: Pointer to flower offload structure + */ +int +ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct net_device *vsi_netdev = vsi->netdev; + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + int err; + + if (ice_is_reset_in_progress(pf->state)) + return -EBUSY; + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + return -EINVAL; + + if (ice_is_port_repr_netdev(netdev)) + vsi_netdev = netdev; + + if (!(vsi_netdev->features & NETIF_F_HW_TC) && + !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { + /* Based on TC indirect notifications from kernel, all ice + * devices get an instance of rule from higher level device. + * Avoid triggering explicit error in this case. + */ + if (netdev == vsi_netdev) + NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again"); + return -EINVAL; + } + + /* avoid duplicate entries, if exists - return error */ + fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); + if (fltr) { + NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring"); + return -EEXIST; + } + + /* prep and add TC-flower filter in HW */ + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); + if (err) + return err; + + /* add filter into an ordered list */ + hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list); + return 0; +} + +/** + * ice_del_cls_flower - delete TC flower filters + * @vsi: Pointer to VSI + * @cls_flower: Pointer to struct flow_cls_offload + */ +int +ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) +{ + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + int err; + + /* find filter */ + fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie); + if (!fltr) { + if (hlist_empty(&pf->tc_flower_fltr_list)) + return 0; + + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it"); + return -EINVAL; + } + + fltr->extack = cls_flower->common.extack; + /* delete filter from HW */ + err = ice_del_tc_fltr(vsi, fltr); + if (err) + return err; + + /* delete filter from an ordered list */ + hlist_del(&fltr->tc_flower_node); + + /* free the filter node */ + kfree(fltr); + + return 0; +} + +/** + * ice_replay_tc_fltrs - replay TC filters + * @pf: pointer to PF struct + */ +void ice_replay_tc_fltrs(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + struct hlist_node *node; + + hlist_for_each_entry_safe(fltr, node, + &pf->tc_flower_fltr_list, + tc_flower_node) { + fltr->extack = NULL; + ice_add_switch_fltr(fltr->src_vsi, fltr); + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h new file mode 100644 index 000000000000..d90e9e37ae25 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_TC_LIB_H_ +#define _ICE_TC_LIB_H_ + +#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0) +#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1) +#define ICE_TC_FLWR_FIELD_VLAN BIT(2) +#define ICE_TC_FLWR_FIELD_DEST_IPV4 BIT(3) +#define ICE_TC_FLWR_FIELD_SRC_IPV4 BIT(4) +#define ICE_TC_FLWR_FIELD_DEST_IPV6 BIT(5) +#define ICE_TC_FLWR_FIELD_SRC_IPV6 BIT(6) +#define ICE_TC_FLWR_FIELD_DEST_L4_PORT BIT(7) +#define ICE_TC_FLWR_FIELD_SRC_L4_PORT BIT(8) +#define ICE_TC_FLWR_FIELD_TENANT_ID BIT(9) +#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 BIT(10) +#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 BIT(11) +#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 BIT(12) +#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 BIT(13) +#define ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT BIT(14) +#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) +#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) +#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) + +struct ice_tc_flower_action { + u32 tc_class; + enum ice_sw_fwd_act_type fltr_act; +}; + +struct ice_tc_vlan_hdr { + __be16 vlan_id; /* Only last 12 bits valid */ + u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */ +}; + +struct ice_tc_l2_hdr { + u8 dst_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + __be16 n_proto; /* Ethernet Protocol */ +}; + +struct ice_tc_l3_hdr { + u8 ip_proto; /* IPPROTO value */ + union { + struct { + struct in_addr dst_ip; + struct in_addr src_ip; + } v4; + struct { + struct in6_addr dst_ip6; + struct in6_addr src_ip6; + } v6; + } ip; +#define dst_ipv6 ip.v6.dst_ip6.s6_addr32 +#define dst_ipv6_addr ip.v6.dst_ip6.s6_addr +#define src_ipv6 ip.v6.src_ip6.s6_addr32 +#define src_ipv6_addr ip.v6.src_ip6.s6_addr +#define dst_ipv4 ip.v4.dst_ip.s_addr +#define src_ipv4 ip.v4.src_ip.s_addr + + u8 tos; + u8 ttl; +}; + +struct ice_tc_l4_hdr { + __be16 dst_port; + __be16 src_port; +}; + +struct ice_tc_flower_lyr_2_4_hdrs { + /* L2 layer fields with their mask */ + struct ice_tc_l2_hdr l2_key; + struct ice_tc_l2_hdr l2_mask; + struct ice_tc_vlan_hdr vlan_hdr; + /* L3 (IPv4[6]) layer fields with their mask */ + struct ice_tc_l3_hdr l3_key; + struct ice_tc_l3_hdr l3_mask; + + /* L4 layer fields with their mask */ + struct ice_tc_l4_hdr l4_key; + struct ice_tc_l4_hdr l4_mask; +}; + +enum ice_eswitch_fltr_direction { + ICE_ESWITCH_FLTR_INGRESS, + ICE_ESWITCH_FLTR_EGRESS, +}; + +struct ice_tc_flower_fltr { + struct hlist_node tc_flower_node; + + /* cookie becomes filter_rule_id if rule is added successfully */ + unsigned long cookie; + + /* add_adv_rule returns information like recipe ID, rule_id. Store + * those values since they are needed to remove advanced rule + */ + u16 rid; + u16 rule_id; + /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon + * destination type + */ + u16 dest_id; + /* if dest_id is vsi_idx, then need to store destination VSI ptr */ + struct ice_vsi *dest_vsi; + /* direction of fltr for eswitch use case */ + enum ice_eswitch_fltr_direction direction; + + /* Parsed TC flower configuration params */ + struct ice_tc_flower_lyr_2_4_hdrs outer_headers; + struct ice_tc_flower_lyr_2_4_hdrs inner_headers; + struct ice_vsi *src_vsi; + __be32 tenant_id; + u32 flags; + struct ice_tc_flower_action action; + + /* cache ptr which is used wherever needed to communicate netlink + * messages + */ + struct netlink_ext_ack *extack; +}; + +int +ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower); +int +ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); +void ice_replay_tc_fltrs(struct ice_pf *pf); + +#endif /* _ICE_TC_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6ee8e0032d52..4da9420df1b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -6,6 +6,7 @@ #include <linux/prefetch.h> #include <linux/mm.h> #include <linux/bpf_trace.h> +#include <net/dsfield.h> #include <net/xdp.h> #include "ice_txrx_lib.h" #include "ice_lib.h" @@ -13,6 +14,7 @@ #include "ice_trace.h" #include "ice_dcb_lib.h" #include "ice_xsk.h" +#include "ice_eswitch.h" #define ICE_RX_HDR_SIZE 256 @@ -2245,6 +2247,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); + if (ice_is_switchdev_running(vsi->back)) + ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; @@ -2296,6 +2300,39 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) } /** + * ice_get_dscp_up - return the UP/TC value for a SKB + * @dcbcfg: DCB config that contains DSCP to UP/TC mapping + * @skb: SKB to query for info to determine UP/TC + * + * This function is to only be called when the PF is in L3 DSCP PFC mode + */ +static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return dcbcfg->dscp_map[dscp]; +} + +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *dcbcfg; + + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) + skb->priority = ice_get_dscp_up(dcbcfg, skb); + + return netdev_pick_tx(netdev, skb, sb_dev); +} + +/** * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue * @tx_ring: tx_ring to clean */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 1e46e80f3d6f..cce348c83da8 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -164,17 +164,10 @@ struct ice_tx_offload_params { }; struct ice_rx_buf { - union { - struct { - dma_addr_t dma; - struct page *page; - unsigned int page_offset; - u16 pagecnt_bias; - }; - struct { - struct xdp_buff *xdp; - }; - }; + dma_addr_t dma; + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; }; struct ice_q_stats { @@ -270,6 +263,7 @@ struct ice_ring { union { struct ice_tx_buf *tx_buf; struct ice_rx_buf *rx_buf; + struct xdp_buff **xdp_buf; }; /* CL2 - 2nd cacheline starts here */ u16 q_index; /* Queue number of ring */ @@ -378,6 +372,9 @@ union ice_32b_rx_flex_desc; bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); +u16 +ice_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); void ice_clean_tx_ring(struct ice_ring *tx_ring); void ice_clean_rx_ring(struct ice_ring *rx_ring); int ice_setup_tx_ring(struct ice_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 171397dcf00a..e314a1aee0ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_txrx_lib.h" +#include "ice_eswitch.h" /** * ice_release_rx_desc - Store the new tail and head values @@ -185,7 +186,8 @@ ice_process_skb_fields(struct ice_ring *rx_ring, ice_rx_hash(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev + (rx_ring, rx_desc)); ice_rx_csum(rx_ring, skb, rx_desc, ptype); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index d33d1906103c..d5cb1c5a89c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -139,6 +139,7 @@ enum ice_vsi_type { ICE_VSI_VF = 1, ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_LB = 6, + ICE_VSI_SWITCHDEV_CTRL = 7, }; struct ice_link_status { @@ -604,7 +605,8 @@ struct ice_dcb_app_priority_table { }; #define ICE_MAX_USER_PRIORITY 8 -#define ICE_DCBX_MAX_APPS 32 +#define ICE_DCBX_MAX_APPS 64 +#define ICE_DSCP_NUM_VAL 64 #define ICE_LLDPDU_SIZE 1500 #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 @@ -622,7 +624,14 @@ struct ice_dcbx_cfg { struct ice_dcb_ets_cfg etscfg; struct ice_dcb_ets_cfg etsrec; struct ice_dcb_pfc_cfg pfc; +#define ICE_QOS_MODE_VLAN 0x0 +#define ICE_QOS_MODE_DSCP 0x1 + u8 pfc_mode; struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; + /* when DSCP mapping defined by user set its bit to 1 */ + DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL); + /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */ + u8 dscp_map[ICE_DSCP_NUM_VAL]; u8 dcbx_mode; #define ICE_DCBX_MODE_CEE 0x1 #define ICE_DCBX_MODE_IEEE 0x2 @@ -668,6 +677,10 @@ struct ice_port_info { struct ice_switch_info { struct list_head vsi_list_map_head; struct ice_sw_recipe *recp_list; + u16 prof_res_bm_init; + u16 max_used_prof_index; + + DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; /* FW logging configuration */ @@ -903,6 +916,7 @@ struct ice_hw { struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; struct ice_mbx_snapshot mbx_snapshot; + u16 io_expander_handle; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e93430ab37f1..4d0b643906ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -6,6 +6,7 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_flow.h" +#include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" #define FIELD_SELECTOR(proto_hdr_field) \ @@ -251,7 +252,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { * ice_get_vf_vsi - get VF's VSI based on the stored index * @vf: VF used to get VSI */ -static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) { return vf->pf->vsi[vf->lan_vsi_idx]; } @@ -412,7 +413,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf) * * send a link status message to a single VF */ -static void ice_vc_notify_vf_link_state(struct ice_vf *vf) +void ice_vc_notify_vf_link_state(struct ice_vf *vf) { struct virtchnl_pf_event pfe = { 0 }; struct ice_hw *hw = &vf->pf->hw; @@ -620,6 +621,8 @@ void ice_free_vfs(struct ice_pf *pf) if (!pf->vf) return; + ice_eswitch_release(pf); + while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); @@ -932,6 +935,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) enum ice_status status; u8 broadcast[ETH_ALEN]; + if (ice_is_eswitch_mode_switchdev(vf->pf)) + return 0; + eth_broadcast_addr(broadcast); status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); if (status) { @@ -1581,6 +1587,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vf_post_vsi_rebuild(vf); } + if (ice_is_eswitch_mode_switchdev(pf)) + if (ice_eswitch_rebuild(pf)) + dev_warn(dev, "eswitch rebuild failed\n"); + ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); @@ -1593,7 +1603,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * * Returns true if the PF or VF is disabled, false otherwise. */ -static bool ice_is_vf_disabled(struct ice_vf *vf) +bool ice_is_vf_disabled(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; @@ -1711,6 +1721,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) } ice_vf_post_vsi_rebuild(vf); + vsi = ice_get_vf_vsi(vf); + ice_eswitch_update_repr(vsi); /* if the VF has been reset allow it to come up again */ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) @@ -1894,6 +1906,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) */ ice_vf_ctrl_invalidate_vsi(vf); ice_vf_fdir_init(vf); + + ice_vc_set_dflt_vf_ops(&vf->vc_ops); } } @@ -1960,6 +1974,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) } clear_bit(ICE_VF_DIS, pf->state); + + if (ice_eswitch_configure(pf)) + goto err_unroll_sriov; + return 0; err_unroll_sriov: @@ -2823,7 +2841,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf) * disabled, and initialized so it can be configured and/or queried by a host * administrator. */ -static int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +int ice_check_vf_ready_for_cfg(struct ice_vf *vf) { struct ice_pf *pf; @@ -3802,6 +3820,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) } /** + * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF + * @vf: VF to update + * @vc_ether_addr: structure from VIRTCHNL with MAC to check + * + * only update cached hardware MAC for legacy VF drivers on delete + * because we cannot guarantee order/type of MAC from the VF driver + */ +static void +ice_update_legacy_cached_mac(struct ice_vf *vf, + struct virtchnl_ether_addr *vc_ether_addr) +{ + if (!ice_is_vc_addr_legacy(vc_ether_addr) || + ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) + return; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); + ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); +} + +/** * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed * @vf: VF to update * @vc_ether_addr: structure from VIRTCHNL with MAC to delete @@ -3822,16 +3860,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) */ eth_zero_addr(vf->dev_lan_addr.addr); - /* only update cached hardware MAC for legacy VF drivers on delete - * because we cannot guarantee order/type of MAC from the VF driver - */ - if (ice_is_vc_addr_legacy(vc_ether_addr) && - !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) { - ether_addr_copy(vf->dev_lan_addr.addr, - vf->legacy_last_added_umac.addr); - ether_addr_copy(vf->hw_lan_addr.addr, - vf->legacy_last_added_umac.addr); - } + ice_update_legacy_cached_mac(vf, vc_ether_addr); } /** @@ -4400,6 +4429,133 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) return ice_vsi_manage_vlan_stripping(vsi, false); } +static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = { + .get_ver_msg = ice_vc_get_ver_msg, + .get_vf_res_msg = ice_vc_get_vf_res_msg, + .reset_vf = ice_vc_reset_vf_msg, + .add_mac_addr_msg = ice_vc_add_mac_addr_msg, + .del_mac_addr_msg = ice_vc_del_mac_addr_msg, + .cfg_qs_msg = ice_vc_cfg_qs_msg, + .ena_qs_msg = ice_vc_ena_qs_msg, + .dis_qs_msg = ice_vc_dis_qs_msg, + .request_qs_msg = ice_vc_request_qs_msg, + .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, + .config_rss_key = ice_vc_config_rss_key, + .config_rss_lut = ice_vc_config_rss_lut, + .get_stats_msg = ice_vc_get_stats_msg, + .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, + .add_vlan_msg = ice_vc_add_vlan_msg, + .remove_vlan_msg = ice_vc_remove_vlan_msg, + .ena_vlan_stripping = ice_vc_ena_vlan_stripping, + .dis_vlan_stripping = ice_vc_dis_vlan_stripping, + .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, + .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, + .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, +}; + +void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) +{ + *ops = ice_vc_vf_dflt_ops; +} + +static int +ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf, + u8 __always_unused *msg) +{ + return 0; +} + +/** + * ice_vc_repr_add_mac + * @vf: pointer to VF + * @msg: virtchannel message + * + * When port representors are created, we do not add MAC rule + * to firmware, we store it so that PF could report same + * MAC as VF. + */ +static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + struct ice_vsi *vsi; + struct ice_pf *pf; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + pf = vf->pf; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + for (i = 0; i < al->num_elements; i++) { + u8 *mac_addr = al->list[i].addr; + + if (!is_unicast_ether_addr(mac_addr) || + ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) + continue; + + if (vf->pf_set_mac) { + dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto handle_mac_exit; + } + + ice_vfhw_mac_add(vf, &al->list[i]); + vf->num_mac++; + break; + } + +handle_mac_exit: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + v_ret, NULL, 0); +} + +/** + * ice_vc_repr_del_mac - response with success for deleting MAC + * @vf: pointer to VF + * @msg: virtchannel message + * + * Respond with success to not break normal VF flow. + * For legacy VF driver try to update cached MAC address. + */ +static int +ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) +{ + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + + ice_update_legacy_cached_mac(vf, &al->list[0]); + + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf) +{ + return 0; +} + +void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) +{ + ops->add_mac_addr_msg = ice_vc_repr_add_mac; + ops->del_mac_addr_msg = ice_vc_repr_del_mac; + ops->add_vlan_msg = ice_vc_repr_no_action_msg; + ops->remove_vlan_msg = ice_vc_repr_no_action_msg; + ops->ena_vlan_stripping = ice_vc_repr_no_action; + ops->dis_vlan_stripping = ice_vc_repr_no_action; + ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg; +} + /** * ice_vc_process_vf_msg - Process request from VF * @pf: pointer to the PF structure @@ -4413,6 +4569,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) u32 v_opcode = le32_to_cpu(event->desc.cookie_high); s16 vf_id = le16_to_cpu(event->desc.retval); u16 msglen = event->msg_len; + struct ice_vc_vf_ops *ops; u8 *msg = event->msg_buf; struct ice_vf *vf = NULL; struct device *dev; @@ -4436,6 +4593,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) goto error_handler; } + ops = &vf->vc_ops; + /* Perform basic checks on the msg */ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (err) { @@ -4463,75 +4622,75 @@ error_handler: switch (v_opcode) { case VIRTCHNL_OP_VERSION: - err = ice_vc_get_ver_msg(vf, msg); + err = ops->get_ver_msg(vf, msg); break; case VIRTCHNL_OP_GET_VF_RESOURCES: - err = ice_vc_get_vf_res_msg(vf, msg); + err = ops->get_vf_res_msg(vf, msg); if (ice_vf_init_vlan_stripping(vf)) dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n", vf->vf_id); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - ice_vc_reset_vf_msg(vf); + ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: - err = ice_vc_add_mac_addr_msg(vf, msg); + err = ops->add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: - err = ice_vc_del_mac_addr_msg(vf, msg); + err = ops->del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - err = ice_vc_cfg_qs_msg(vf, msg); + err = ops->cfg_qs_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: - err = ice_vc_ena_qs_msg(vf, msg); + err = ops->ena_qs_msg(vf, msg); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: - err = ice_vc_dis_qs_msg(vf, msg); + err = ops->dis_qs_msg(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: - err = ice_vc_request_qs_msg(vf, msg); + err = ops->request_qs_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - err = ice_vc_cfg_irq_map_msg(vf, msg); + err = ops->cfg_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - err = ice_vc_config_rss_key(vf, msg); + err = ops->config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - err = ice_vc_config_rss_lut(vf, msg); + err = ops->config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_STATS: - err = ice_vc_get_stats_msg(vf, msg); + err = ops->get_stats_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - err = ice_vc_cfg_promiscuous_mode_msg(vf, msg); + err = ops->cfg_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: - err = ice_vc_add_vlan_msg(vf, msg); + err = ops->add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: - err = ice_vc_remove_vlan_msg(vf, msg); + err = ops->remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: - err = ice_vc_ena_vlan_stripping(vf); + err = ops->ena_vlan_stripping(vf); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: - err = ice_vc_dis_vlan_stripping(vf); + err = ops->dis_vlan_stripping(vf); break; case VIRTCHNL_OP_ADD_FDIR_FILTER: - err = ice_vc_add_fdir_fltr(vf, msg); + err = ops->add_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_FDIR_FILTER: - err = ice_vc_del_fdir_fltr(vf, msg); + err = ops->del_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_RSS_CFG: - err = ice_vc_handle_rss_cfg(vf, msg, true); + err = ops->handle_rss_cfg_msg(vf, msg, true); break; case VIRTCHNL_OP_DEL_RSS_CFG: - err = ice_vc_handle_rss_cfg(vf, msg, false); + err = ops->handle_rss_cfg_msg(vf, msg, false); break; case VIRTCHNL_OP_UNKNOWN: default: @@ -4640,6 +4799,11 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct ice_vf *vf; int ret; + if (ice_is_eswitch_mode_switchdev(pf)) { + dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); + return -EOPNOTSUPP; + } + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 842cb077df86..3115284e5411 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -70,6 +70,32 @@ struct ice_mdd_vf_events { u16 last_printed; }; +struct ice_vf; + +struct ice_vc_vf_ops { + int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); + int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); + void (*reset_vf)(struct ice_vf *vf); + int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*request_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); + int (*config_rss_key)(struct ice_vf *vf, u8 *msg); + int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); + int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_vlan_stripping)(struct ice_vf *vf); + int (*dis_vlan_stripping)(struct ice_vf *vf); + int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); + int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); +}; + /* VF information structure */ struct ice_vf { struct ice_pf *pf; @@ -111,9 +137,17 @@ struct ice_vf { struct ice_mdd_vf_events mdd_rx_events; struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + + struct ice_repr *repr; + + struct ice_vc_vf_ops vc_ops; + + /* devlink port data */ + struct devlink_port devlink_port; }; #ifdef CONFIG_PCI_IOV +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); @@ -124,6 +158,9 @@ void ice_free_vfs(struct ice_pf *pf); void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); +void ice_vc_notify_vf_link_state(struct ice_vf *vf); +void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops); +void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); @@ -139,6 +176,10 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); +int ice_check_vf_ready_for_cfg(struct ice_vf *vf); + +bool ice_is_vf_disabled(struct ice_vf *vf); + int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); @@ -164,6 +205,9 @@ static inline void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_vc_notify_link_state(struct ice_pf *pf) { } static inline void ice_vc_notify_reset(struct ice_pf *pf) { } +static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { } +static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { } +static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { } static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { } static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } @@ -171,6 +215,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } +static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_is_vf_disabled(struct ice_vf *vf) +{ + return true; +} + +static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +{ + return NULL; +} + static inline bool ice_is_malicious_vf(struct ice_pf __always_unused *pf, struct ice_rq_event_info __always_unused *event, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 5a9f61deeb38..7682eaa9a9ec 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -364,45 +364,39 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; - struct ice_rx_buf *rx_buf; - bool ok = true; + struct xdp_buff **xdp; + u32 nb_buffs, i; dma_addr_t dma; - if (!count) - return true; - rx_desc = ICE_RX_DESC(rx_ring, ntu); - rx_buf = &rx_ring->rx_buf[ntu]; + xdp = &rx_ring->xdp_buf[ntu]; - do { - rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); - if (!rx_buf->xdp) { - ok = false; - break; - } + nb_buffs = min_t(u16, count, rx_ring->count - ntu); + nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); + if (!nb_buffs) + return false; - dma = xsk_buff_xdp_get_dma(rx_buf->xdp); + i = nb_buffs; + while (i--) { + dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); - rx_desc->wb.status_error0 = 0; rx_desc++; - rx_buf++; - ntu++; - - if (unlikely(ntu == rx_ring->count)) { - rx_desc = ICE_RX_DESC(rx_ring, 0); - rx_buf = rx_ring->rx_buf; - ntu = 0; - } - } while (--count); + xdp++; + } - if (rx_ring->next_to_use != ntu) { - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.status_error0 = 0; - ice_release_rx_desc(rx_ring, ntu); + ntu += nb_buffs; + if (ntu == rx_ring->count) { + rx_desc = ICE_RX_DESC(rx_ring, 0); + xdp = rx_ring->xdp_buf; + ntu = 0; } - return ok; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.status_error0 = 0; + ice_release_rx_desc(rx_ring, ntu); + + return count == nb_buffs ? true : false; } /** @@ -421,19 +415,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring) /** * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * @rx_ring: Rx ring - * @rx_buf: zero-copy Rx buffer + * @xdp_arr: Pointer to the SW ring of xdp_buff pointers * * This function allocates a new skb from a zero-copy Rx buffer. * * Returns the skb on success, NULL on failure. */ static struct sk_buff * -ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) +ice_construct_skb_zc(struct ice_ring *rx_ring, struct xdp_buff **xdp_arr) { - unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; - unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; - unsigned int datasize_hard = rx_buf->xdp->data_end - - rx_buf->xdp->data_hard_start; + struct xdp_buff *xdp = *xdp_arr; + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start; struct sk_buff *skb; skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, @@ -441,13 +435,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) if (unlikely(!skb)) return NULL; - skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); if (metasize) skb_metadata_set(skb, metasize); - xsk_buff_free(rx_buf->xdp); - rx_buf->xdp = NULL; + xsk_buff_free(xdp); + *xdp_arr = NULL; return skb; } @@ -521,7 +515,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; - struct ice_rx_buf *rx_buf; + struct xdp_buff **xdp; struct sk_buff *skb; u16 stat_err_bits; u16 vlan_tag = 0; @@ -544,18 +538,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (!size) break; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - rx_buf->xdp->data_end = rx_buf->xdp->data + size; - xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool); + xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean]; + xsk_buff_set_size(*xdp, size); + xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool); - xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp); + xdp_res = ice_run_xdp_zc(rx_ring, *xdp); if (xdp_res) { if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) xdp_xmit |= xdp_res; else - xsk_buff_free(rx_buf->xdp); + xsk_buff_free(*xdp); - rx_buf->xdp = NULL; + *xdp = NULL; total_rx_bytes += size; total_rx_packets++; cleaned_count++; @@ -565,7 +559,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) } /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, rx_buf); + skb = ice_construct_skb_zc(rx_ring, xdp); if (!skb) { rx_ring->rx_stats.alloc_buf_failed++; break; @@ -813,12 +807,12 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) u16 i; for (i = 0; i < rx_ring->count; i++) { - struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + struct xdp_buff **xdp = &rx_ring->xdp_buf[i]; - if (!rx_buf->xdp) + if (!xdp) continue; - rx_buf->xdp = NULL; + *xdp = NULL; } } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 751de06019a0..e67a71c3f141 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3356,7 +3356,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error\n"); } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); @@ -4988,7 +4988,7 @@ static int igb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index d32e72d953c8..74ccd622251a 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1527,8 +1527,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter) spin_unlock_bh(&hw->mbx_lock); if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); } @@ -1813,7 +1812,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); return 0; } @@ -2816,8 +2815,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); } spin_unlock_bh(&hw->mbx_lock); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 0e19b4d02e62..7ffb1045f00c 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -949,7 +949,7 @@ static int igc_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ @@ -6377,7 +6377,7 @@ static int igc_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "NVM Read Error\n"); } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c index a430871d1c27..c8d1e815ec6b 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -549,7 +549,7 @@ ixgb_mta_set(struct ixgb_hw *hw, *****************************************************************************/ void ixgb_rar_set(struct ixgb_hw *hw, - u8 *addr, + const u8 *addr, u32 index) { u32 rar_low, rar_high; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h index 6064583095da..70bcff5fb3db 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -740,7 +740,7 @@ bool ixgb_adapter_start(struct ixgb_hw *hw); void ixgb_check_for_link(struct ixgb_hw *hw); bool ixgb_check_for_bad_link(struct ixgb_hw *hw); -void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index); +void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index); /* Filters (multicast, vlan, receive) */ void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list, diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 1588376d4c67..5e1e2f0db82a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1030,7 +1030,7 @@ ixgb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); ixgb_rar_set(&adapter->hw, addr->sa_data, 0); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a604552fa634..4a69823e6abd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -351,6 +351,7 @@ struct ixgbe_ring { }; u16 rx_offset; struct xdp_rxq_info xdp_rxq; + spinlock_t tx_lock; /* used in XDP mode */ struct xsk_buff_pool *xsk_pool; u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ u16 rx_buf_len; @@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 #define IXGBE_MAX_MACVLANS 63 +DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); + struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ @@ -629,7 +632,7 @@ struct ixgbe_adapter { /* XDP */ int num_xdp_queues; - struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS]; unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */ /* TX */ @@ -772,6 +775,22 @@ struct ixgbe_adapter { #endif /* CONFIG_IXGBE_IPSEC */ }; +static inline int ixgbe_determine_xdp_q_idx(int cpu) +{ + if (static_key_enabled(&ixgbe_xdp_locking_key)) + return cpu % IXGBE_MAX_XDP_QS; + else + return cpu; +} + +static inline +struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter) +{ + int index = ixgbe_determine_xdp_q_idx(smp_processor_id()); + + return adapter->xdp_ring[index]; +} + static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 0218f6c9b925..86b11164655e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) { - return adapter->xdp_prog ? nr_cpu_ids : 0; + int queues; + + queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); + return adapter->xdp_prog ? queues : 0; } #define IXGBE_RSS_64Q_MASK 0x3F @@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = xdp_idx; set_ring_xdp(ring); + spin_lock_init(&ring->tx_lock); /* assign ring to adapter */ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); @@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->q_vector[v_idx] = NULL; __netif_napi_del(&q_vector->napi); + if (static_key_enabled(&ixgbe_xdp_locking_key)) + static_branch_dec(&ixgbe_xdp_locking_key); + /* * after a call to __netif_napi_del() napi may still be used and * ixgbe_get_stats64() might access the rings on this vector, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 13c4782b920a..0f9f022260d7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); +DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); +EXPORT_SYMBOL(ixgbe_xdp_locking_key); + static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); @@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; @@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; @@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); @@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (ixgbe_init_rss_key(adapter)) return -ENOMEM; - adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); if (!adapter->af_xdp_zc_qps) return -ENOMEM; @@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; u32 len, cmd_type; @@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); ixgbe_mac_set_default_filter(adapter); @@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -EINVAL; } - if (nr_cpu_ids > MAX_XDP_QUEUES) + /* if the number of cpus is much larger than the maximum of queues, + * we should stop it and then return with ENOMEM like before. + */ + if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) return -ENOMEM; + else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) + static_branch_inc(&ixgbe_xdp_locking_key); old_prog = xchg(&adapter->xdp_prog, prog); need_reset = (!!prog != !!old_prog); @@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) writel(ring->next_to_use, ring->tail); } +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) +{ + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); +} + static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; if (unlikely(!ring)) return -ENXIO; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) return -ENXIO; + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; - err = ixgbe_xmit_xdp_ring(adapter, xdpf); + err = ixgbe_xmit_xdp_ring(ring, xdpf); if (err != IXGBE_XDP_TX) break; nxmit++; @@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + return nxmit; } @@ -10903,7 +10927,7 @@ skip_sriov: eth_platform_get_mac_address(&adapter->pdev->dev, adapter->hw.mac.perm_addr); - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + eth_hw_addr_set(netdev, hw->mac.perm_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 2aeec78029bc..a82533f21d36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -12,7 +12,7 @@ #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf); bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, @@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb); void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring); void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index b1d22e4d5ec9..db2bc58dfcfd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; @@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; @@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c714e1ecd308..d81811ab4ec4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2540,7 +2540,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } if (is_valid_ether_addr(adapter->hw.mac.addr)) { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } @@ -3054,7 +3054,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - ether_addr_copy(netdev->dev_addr, hw->mac.addr); + eth_hw_addr_set(netdev, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -4231,7 +4231,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) ether_addr_copy(hw->mac.addr, addr->sa_data); ether_addr_copy(hw->mac.perm_addr, addr->sa_data); - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); return 0; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 1bdc4f23e1e5..439674fc9765 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -313,7 +313,7 @@ jme_load_macaddr(struct net_device *netdev) val = jread32(jme, JME_RXUMA_HI); macaddr[4] = (val >> 0) & 0xFF; macaddr[5] = (val >> 8) & 0xFF; - memcpy(netdev->dev_addr, macaddr, ETH_ALEN); + eth_hw_addr_set(netdev, macaddr); spin_unlock_bh(&jme->macaddr_lock); } @@ -2254,7 +2254,7 @@ jme_set_macaddr(struct net_device *netdev, void *p) return -EBUSY; spin_lock_bh(&jme->macaddr_lock); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); jme_set_unicastaddr(netdev); spin_unlock_bh(&jme->macaddr_lock); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 3e9f324f1061..df9a8eefa007 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1297,8 +1297,8 @@ static int korina_probe(struct platform_device *pdev) lp = netdev_priv(dev); if (mac_addr) - ether_addr_copy(dev->dev_addr, mac_addr); - else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0) + eth_hw_addr_set(dev, mac_addr); + else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0) eth_hw_addr_random(dev); clk = devm_clk_get_optional(&pdev->dev, "mdioclk"); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 62f8c5212182..2258e3f19161 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -96,6 +96,9 @@ struct ltq_etop_priv { struct ltq_etop_chan ch[MAX_DMA_CHAN]; int tx_free[MAX_DMA_CHAN >> 1]; + int tx_burst_len; + int rx_burst_len; + spinlock_t lock; }; @@ -259,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev) /* enable crc generation */ ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); - ltq_dma_init_port(DMA_PORT_ETOP); + ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len); for (i = 0; i < MAX_DMA_CHAN; i++) { int irq = LTQ_DMA_CH0_INT + i; @@ -472,8 +475,8 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - /* dma needs to start on a 16 byte aligned address */ - byte_offset = CPHYSADDR(skb->data) % 16; + /* dma needs to start on a burst length value aligned address */ + byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4); ch->skb[ch->dma.desc] = skb; netif_trans_update(dev); @@ -667,6 +670,18 @@ ltq_etop_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); SET_NETDEV_DEV(dev, &pdev->dev); + err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len); + if (err < 0) { + dev_err(&pdev->dev, "unable to read tx-burst-length property\n"); + return err; + } + + err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len); + if (err < 0) { + dev_err(&pdev->dev, "unable to read rx-burst-length property\n"); + return err; + } + for (i = 0; i < MAX_DMA_CHAN; i++) { if (IS_TX(i)) netif_napi_add(dev, &priv->ch[i].napi, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index fb78f17d734f..ecf1e11d9b91 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -14,13 +14,15 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/if_vlan.h> + #include <linux/of_net.h> #include <linux/of_platform.h> #include <xway_dma.h> /* DMA */ -#define XRX200_DMA_DATA_LEN 0x600 +#define XRX200_DMA_DATA_LEN (SZ_64K - 1) #define XRX200_DMA_RX 0 #define XRX200_DMA_TX 1 @@ -71,6 +73,9 @@ struct xrx200_priv { struct net_device *net_dev; struct device *dev; + int tx_burst_len; + int rx_burst_len; + __iomem void *pmac_reg; }; @@ -106,7 +111,8 @@ static void xrx200_flush_dma(struct xrx200_chan *ch) break; desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - XRX200_DMA_DATA_LEN; + (ch->priv->net_dev->mtu + VLAN_ETH_HLEN + + ETH_FCS_LEN); ch->dma.desc++; ch->dma.desc %= LTQ_DESC_NUM; } @@ -154,19 +160,20 @@ static int xrx200_close(struct net_device *net_dev) static int xrx200_alloc_skb(struct xrx200_chan *ch) { + int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; struct sk_buff *skb = ch->skb[ch->dma.desc]; dma_addr_t mapping; int ret = 0; ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, - XRX200_DMA_DATA_LEN); + len); if (!ch->skb[ch->dma.desc]) { ret = -ENOMEM; goto skip; } mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, - XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE); + len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { dev_kfree_skb_any(ch->skb[ch->dma.desc]); ch->skb[ch->dma.desc] = skb; @@ -179,8 +186,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch) wmb(); skip: ch->dma.desc_base[ch->dma.desc].ctl = - LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - XRX200_DMA_DATA_LEN; + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; return ret; } @@ -316,8 +322,8 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(priv->dev, mapping))) goto err_drop; - /* dma needs to start on a 16 byte aligned address */ - byte_offset = mapping % 16; + /* dma needs to start on a burst length value aligned address */ + byte_offset = mapping % (priv->tx_burst_len * 4); desc->addr = mapping - byte_offset; /* Make sure the address is written before we give it to HW */ @@ -340,10 +346,57 @@ err_drop: return NETDEV_TX_OK; } +static int +xrx200_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + struct xrx200_chan *ch_rx = &priv->chan_rx; + int old_mtu = net_dev->mtu; + bool running = false; + struct sk_buff *skb; + int curr_desc; + int ret = 0; + + net_dev->mtu = new_mtu; + + if (new_mtu <= old_mtu) + return ret; + + running = netif_running(net_dev); + if (running) { + napi_disable(&ch_rx->napi); + ltq_dma_close(&ch_rx->dma); + } + + xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM); + curr_desc = ch_rx->dma.desc; + + for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; + ch_rx->dma.desc++) { + skb = ch_rx->skb[ch_rx->dma.desc]; + ret = xrx200_alloc_skb(ch_rx); + if (ret) { + net_dev->mtu = old_mtu; + break; + } + dev_kfree_skb_any(skb); + } + + ch_rx->dma.desc = curr_desc; + if (running) { + napi_enable(&ch_rx->napi); + ltq_dma_open(&ch_rx->dma); + ltq_dma_enable_irq(&ch_rx->dma); + } + + return ret; +} + static const struct net_device_ops xrx200_netdev_ops = { .ndo_open = xrx200_open, .ndo_stop = xrx200_close, .ndo_start_xmit = xrx200_start_xmit, + .ndo_change_mtu = xrx200_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -369,7 +422,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv) int ret = 0; int i; - ltq_dma_init_port(DMA_PORT_ETOP); + ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len); ch_rx->dma.nr = XRX200_DMA_RX; ch_rx->dma.dev = priv->dev; @@ -453,7 +506,7 @@ static int xrx200_probe(struct platform_device *pdev) net_dev->netdev_ops = &xrx200_netdev_ops; SET_NETDEV_DEV(net_dev, dev); net_dev->min_mtu = ETH_ZLEN; - net_dev->max_mtu = XRX200_DMA_DATA_LEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; /* load the memory ranges */ priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); @@ -474,10 +527,22 @@ static int xrx200_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - err = of_get_mac_address(np, net_dev->dev_addr); + err = of_get_ethdev_address(np, net_dev); if (err) eth_hw_addr_random(net_dev); + err = device_property_read_u32(dev, "lantiq,tx-burst-length", &priv->tx_burst_len); + if (err < 0) { + dev_err(dev, "unable to read tx-burst-length property\n"); + return err; + } + + err = device_property_read_u32(dev, "lantiq,rx-burst-length", &priv->rx_burst_len); + if (err < 0) { + dev_err(dev, "unable to read rx-burst-length property\n"); + return err; + } + /* bring up the dma engine and IP core */ err = xrx200_dma_init(priv); if (err) diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig index 63bf01d28f0c..f99adbf26ab4 100644 --- a/drivers/net/ethernet/litex/Kconfig +++ b/drivers/net/ethernet/litex/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_LITEX config LITEX_LITEETH tristate "LiteX Ethernet support" - depends on OF_NET + depends on OF help If you wish to compile a kernel for hardware with a LiteX LiteEth device then you should answer Y to this. diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c index a9bdbf0dcfe1..3d9385a4989b 100644 --- a/drivers/net/ethernet/litex/litex_liteeth.c +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -266,7 +266,7 @@ static int liteeth_probe(struct platform_device *pdev) priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size; priv->tx_slot = 0; - err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + err = of_get_ethdev_address(pdev->dev.of_node, netdev); if (err) eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 28d5ad296646..a63d9a5c8059 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1770,7 +1770,7 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) addr[5] = mac_l & 0xff; } -static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) +static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr) { wrlp(mp, MAC_ADDR_HIGH, (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); @@ -1919,7 +1919,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); netif_addr_lock_bh(dev); mv643xx_eth_program_unicast_filter(dev); @@ -2926,7 +2926,7 @@ static void set_params(struct mv643xx_eth_private *mp, unsigned int tx_ring_size; if (is_valid_ether_addr(pd->mac_addr)) - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac_addr); else uc_addr_get(mp, dev->dev_addr); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d460a270601..e2ce84ecb7a6 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1623,8 +1623,8 @@ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, } /* Set mac address */ -static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, - int queue) +static void mvneta_mac_addr_set(struct mvneta_port *pp, + const unsigned char *addr, int queue) { unsigned int mac_h; unsigned int mac_l; @@ -5242,14 +5242,14 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_ports; } - err = of_get_mac_address(dn, dev->dev_addr); + err = of_get_ethdev_address(dn, dev); if (!err) { mac_from = "device tree"; } else { mvneta_get_mac_addr(pp, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { mac_from = "hardware"; - memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, hw_mac_addr); } else { mac_from = "random"; eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d5c92e43f89e..ad3be55cce68 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6081,9 +6081,9 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, char hw_mac_addr[ETH_ALEN] = {0}; char fw_mac_addr[ETH_ALEN]; - if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { + if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) { *mac_from = "firmware node"; - ether_addr_copy(dev->dev_addr, fw_mac_addr); + eth_hw_addr_set(dev, fw_mac_addr); return; } @@ -6091,7 +6091,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, mvpp21_get_mac_address(port, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { *mac_from = "hardware"; - ether_addr_copy(dev->dev_addr, hw_mac_addr); + eth_hw_addr_set(dev, hw_mac_addr); return; } } @@ -6301,12 +6301,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_XAUI: case PHY_INTERFACE_MODE_NA: if (mvpp2_port_supports_xlg(port)) { - phylink_set(mask, 10000baseT_Full); - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); + phylink_set_10g_modes(mask); phylink_set(mask, 10000baseKR_Full); } if (state->interface != PHY_INTERFACE_MODE_NA) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 93575800ca92..75ba57bd1d46 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -2347,7 +2347,7 @@ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) return err; /* Set addr in the device */ - ether_addr_copy(dev->dev_addr, da); + eth_hw_addr_set(dev, da); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 34a089b71e55..186d00a9ab35 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) if (!cgx) return; - if (is_dev_rpm(cgx)) - return; - if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); @@ -1522,7 +1519,6 @@ static int cgx_lmac_exit(struct cgx *cgx) int i; if (cgx->cgx_cmd_workq) { - flush_workqueue(cgx->cgx_cmd_workq); destroy_workqueue(cgx->cgx_cmd_workq); cgx->cgx_cmd_workq = NULL; } @@ -1545,9 +1541,11 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { if (is_dev_rpm(cgx)) - cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); + cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | + RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); else - cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | + RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); } static struct mac_ops cgx_mac_ops = { @@ -1571,6 +1569,7 @@ static struct mac_ops cgx_mac_ops = { .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, .mac_pause_frm_config = cgx_lmac_pause_frm_config, + .mac_enadis_ptp_config = cgx_lmac_ptp_config, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index d9bea13f15b8..8931864ee110 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -191,6 +191,7 @@ enum nix_scheduler { #define NIX_CHAN_SDP_CH_START (0x700ull) #define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a)) #define NIX_CHAN_SDP_NUM_CHANS 256 +#define NIX_CHAN_CPT_CH_START (0x800ull) /* The mask is to extract lower 10-bits of channel number * which CPT will pass to X2P. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index c38306b3384a..fc6e7423cbd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -102,6 +102,11 @@ struct mac_ops { void (*mac_pause_frm_config)(void *cgxd, int lmac_id, bool enable); + + /* Enable/Disable Inbound PTP */ + void (*mac_enadis_ptp_config)(void *cgxd, + int lmac_id, + bool enable); }; struct cgx { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 154877706a0e..dfe487235007 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -84,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0009) +#define OTX2_MBOX_VERSION (0x000a) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \ M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \ M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \ cgx_pause_frm_cfg) \ -M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ -M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ -M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ -M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ -M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ - cgx_set_link_mode_rsp) \ -M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ - cgx_features_info_msg) \ -M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ -M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ - cgx_mac_addr_add_rsp) \ -M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ +M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ +M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ +M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \ msg_rsp) \ -M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \ cgx_max_dmac_entries_get_rsp) \ -M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ -M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ +M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ +M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\ + cgx_set_link_mode_rsp) \ +M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ + cgx_features_info_msg) \ +M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \ msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ @@ -186,6 +186,8 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \ M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ cpt_rd_wr_reg_msg) \ +M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \ + cpt_inline_ipsec_cfg_msg, msg_rsp) \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ @@ -229,6 +231,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_req, \ npc_mcam_read_entry_rsp) \ +M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \ + npc_set_pkind, msg_rsp) \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ msg_req, npc_mcam_read_base_rule_rsp) \ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ @@ -270,6 +274,10 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ +M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \ + nix_inline_ipsec_cfg, msg_rsp) \ +M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \ + nix_inline_ipsec_lf_cfg, msg_rsp) \ M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ @@ -575,10 +583,13 @@ struct cgx_mac_addr_update_req { }; #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ -#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ -#define RVU_MAC_VERSION BIT_ULL(2) -#define RVU_MAC_CGX BIT_ULL(3) -#define RVU_MAC_RPM BIT_ULL(4) +#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1) + /* flow control from physical link higig2 messages */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */ +#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */ +#define RVU_MAC_VERSION BIT_ULL(4) +#define RVU_MAC_CGX BIT_ULL(5) +#define RVU_MAC_RPM BIT_ULL(6) struct cgx_features_info_msg { struct mbox_msghdr hdr; @@ -593,6 +604,22 @@ struct rpm_stats_rsp { u64 tx_stats[RPM_TX_STATS_COUNT]; }; +struct npc_set_pkind { + struct mbox_msghdr hdr; +#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0) +#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63) + u64 mode; +#define PKIND_TX BIT_ULL(0) +#define PKIND_RX BIT_ULL(1) + u8 dir; + u8 pkind; /* valid only in case custom flag */ + u8 var_len_off; /* Offset of custom header length field. + * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND + */ + u8 var_len_off_mask; /* Mask for length with in offset */ + u8 shift_dir; /* shift direction to get length of the header at var_len_off */ +}; + /* NPA mbox message formats */ /* NPA mailbox error codes @@ -698,6 +725,8 @@ enum nix_af_status { NIX_AF_ERR_INVALID_BANDPROF = -426, NIX_AF_ERR_IPOLICER_NOTSUPP = -427, NIX_AF_ERR_BANDPROF_INVAL_REQ = -428, + NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, + NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, }; /* For NIX RX vtag action */ @@ -1065,6 +1094,40 @@ struct nix_bp_cfg_rsp { u8 chan_cnt; /* Number of channel for which bpids are assigned */ }; +/* Global NIX inline IPSec configuration */ +struct nix_inline_ipsec_cfg { + struct mbox_msghdr hdr; + u32 cpt_credit; + struct { + u8 egrp; + u8 opcode; + u16 param1; + u16 param2; + } gen_cfg; + struct { + u16 cpt_pf_func; + u8 cpt_slot; + } inst_qsel; + u8 enable; +}; + +/* Per NIX LF inline IPSec configuration */ +struct nix_inline_ipsec_lf_cfg { + struct mbox_msghdr hdr; + u64 sa_base_addr; + struct { + u32 tag_const; + u16 lenm1_max; + u8 sa_pow2_size; + u8 tt; + } ipsec_cfg0; + struct { + u32 sa_idx_max; + u8 sa_idx_w; + } ipsec_cfg1; + u8 enable; +}; + struct nix_hw_info { struct mbox_msghdr hdr; u16 rsvs16; @@ -1357,12 +1420,15 @@ struct npc_mcam_get_stats_rsp { enum ptp_op { PTP_OP_ADJFINE = 0, PTP_OP_GET_CLOCK = 1, + PTP_OP_GET_TSTMP = 2, + PTP_OP_SET_THRESH = 3, }; struct ptp_req { struct mbox_msghdr hdr; u8 op; s64 scaled_ppm; + u64 thresh; }; struct ptp_rsp { @@ -1399,7 +1465,9 @@ enum cpt_af_status { CPT_AF_ERR_LF_INVALID = -903, CPT_AF_ERR_ACCESS_DENIED = -904, CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905, - CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906 + CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906, + CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907, + CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908 }; /* CPT mbox message formats */ @@ -1420,6 +1488,22 @@ struct cpt_lf_alloc_req_msg { int blkaddr; }; +#define CPT_INLINE_INBOUND 0 +#define CPT_INLINE_OUTBOUND 1 + +/* Mailbox message request format for CPT IPsec + * inline inbound and outbound configuration. + */ +struct cpt_inline_ipsec_cfg_msg { + struct mbox_msghdr hdr; + u8 enable; + u8 slot; + u8 dir; + u8 sso_pf_func_ovrd; + u16 sso_pf_func; /* inbound path SSO_PF_FUNC */ + u16 nix_pf_func; /* outbound path NIX_PF_FUNC */ +}; + /* Mailbox message request and response format for CPT stats. */ struct cpt_sts_req { struct mbox_msghdr hdr; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3a819b24accc..6e1192f52608 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -31,9 +31,9 @@ enum npc_kpu_la_ltype { NPC_LT_LA_HIGIG2_ETHER, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_LT_LA_CH_LEN_90B_ETHER, NPC_LT_LA_CPT_HDR, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_LT_LA_CUSTOM_PRE_L2_ETHER, NPC_LT_LA_CUSTOM0 = 0xE, NPC_LT_LA_CUSTOM1 = 0xF, }; @@ -148,10 +148,11 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ -#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND enum npc_pkind_type { NPC_RX_LBK_PKIND = 0ULL, + NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL, NPC_RX_VLAN_EXDSA_PKIND = 56ULL, NPC_RX_CHLEN24B_PKIND = 57ULL, NPC_RX_CPT_HDR_PKIND, @@ -162,6 +163,10 @@ enum npc_pkind_type { NPC_TX_DEF_PKIND, /* NIX-TX PKIND */ }; +enum npc_interface_type { + NPC_INTF_MODE_DEF, +}; + /* list of known and supported fields in packet header and * fields present in key structure. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index 588822a0cf21..1a8c5376297c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -176,9 +176,8 @@ enum npc_kpu_parser_state { NPC_S_KPU1_EXDSA, NPC_S_KPU1_HIGIG2, NPC_S_KPU1_IH_NIX_HIGIG2, - NPC_S_KPU1_CUSTOM_L2_90B, + NPC_S_KPU1_CUSTOM_PRE_L2, NPC_S_KPU1_CPT_HDR, - NPC_S_KPU1_CUSTOM_L2_24B, NPC_S_KPU1_VLAN_EXDSA, NPC_S_KPU2_CTAG, NPC_S_KPU2_CTAG2, @@ -188,6 +187,8 @@ enum npc_kpu_parser_state { NPC_S_KPU2_PREHEADER, NPC_S_KPU2_EXDSA, NPC_S_KPU2_NGIO, + NPC_S_KPU2_CPT_CTAG, + NPC_S_KPU2_CPT_QINQ, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, @@ -979,8 +980,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, + NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER, 0, 0, 0, 0, 0, @@ -996,27 +997,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 36, 40, 44, 0, 0, - NPC_S_KPU1_CUSTOM_L2_24B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 40, 54, 58, 0, 0, - NPC_S_KPU1_CPT_HDR, 0, 0, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CPT_HDR, 40, 0, NPC_LID_LA, NPC_LT_NA, 0, - 0, 0, 0, 0, + 7, 7, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 102, 106, 110, 0, 0, - NPC_S_KPU1_CUSTOM_L2_90B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, 0, 0, 0, 0, 0, @@ -1711,7 +1712,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP, 0xffff, 0x0000, @@ -1720,7 +1721,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP6, 0xffff, 0x0000, @@ -1729,7 +1730,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ARP, 0xffff, 0x0000, @@ -1738,7 +1739,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_RARP, 0xffff, 0x0000, @@ -1747,7 +1748,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_PTP, 0xffff, 0x0000, @@ -1756,7 +1757,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_FCOE, 0xffff, 0x0000, @@ -1765,7 +1766,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_CTAG, @@ -1774,7 +1775,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1783,7 +1784,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_SBTAG, 0xffff, 0x0000, @@ -1792,7 +1793,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -1801,7 +1802,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ETAG, 0xffff, 0x0000, @@ -1810,7 +1811,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSU, 0xffff, 0x0000, @@ -1819,7 +1820,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSM, 0xffff, 0x0000, @@ -1828,7 +1829,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_NSH, 0xffff, 0x0000, @@ -1837,7 +1838,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, 0x0000, 0x0000, 0x0000, @@ -1847,168 +1848,33 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, 0x0000, 0x0000, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_QINQ, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, - NPC_ETYPE_IP, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, NPC_ETYPE_IP6, 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, 0x0000, - 0xffff, 0x0000, 0x0000, - NPC_ETYPE_CTAG, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, 0x0000, - NPC_ETYPE_QINQ, - 0xffff, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_PTP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_FCOE, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_S_KPU1_CPT_HDR, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -2017,51 +1883,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ETAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSU, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSM, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_NSH, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { NPC_S_KPU1_VLAN_EXDSA, 0xff, NPC_ETYPE_CTAG, 0xffff, @@ -3066,6 +2887,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, }, { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -7496,15 +7353,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = { NPC_S_KPU9_GTPU, 0xff, 0x0000, 0x0000, - NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, - NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU9_GTPU, 0xff, - 0x0000, - 0x0000, NPC_GTP_PT_GTP | NPC_GTP_VER1, NPC_GTP_PT_MASK | NPC_GTP_VER_MASK, 0x0000, @@ -9192,159 +9040,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP6, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_ARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_RARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_PTP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_FCOE, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_CTAG2, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_CTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_SBTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_QINQ, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + NPC_S_KPU2_ETAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_S_KPU4_NSH, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 60, 1, + NPC_S_KPU5_CPT_IP, 14, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, 0, 0, 0, 0, 0, @@ -9352,7 +9168,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 60, 1, + NPC_S_KPU5_CPT_IP6, 14, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, 0, 0, 0, 0, 0, @@ -9360,7 +9176,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 58, 1, + NPC_S_KPU2_CPT_CTAG, 12, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, @@ -9368,141 +9184,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 58, 1, + NPC_S_KPU2_CPT_QINQ, 12, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 0, 0, 1, 0, NPC_S_KPU3_VLAN_EXDSA, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, @@ -10395,6 +10083,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -14335,16 +14055,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU12_TU_IP, 8, 1, - NPC_LID_LE, NPC_LT_LE_GTPU, - NPC_F_LE_L_GTPU_G_PDU, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU12_TU_IP, 8, 1, + 8, 0, 6, 2, 1, + NPC_S_NA, 0, 1, NPC_LID_LE, NPC_LT_LE_GTPU, 0, 0, 0, 0, 0, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 9b8e59f4c206..d6321de3cc17 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -27,54 +27,29 @@ #define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_PTP_BAR_NO 0 -#define PCI_RST_BAR_NO 0 #define PTP_CLOCK_CFG 0xF00ULL #define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0) +#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1) +#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2) +#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) +#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) +#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) +#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) +#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) + +#define PTP_PPS_HI_INCR 0xF60ULL +#define PTP_PPS_LO_INCR 0xF68ULL +#define PTP_PPS_THRESH_HI 0xF58ULL + #define PTP_CLOCK_LO 0xF08ULL #define PTP_CLOCK_HI 0xF10ULL #define PTP_CLOCK_COMP 0xF18ULL - -#define RST_BOOT 0x1600ULL -#define RST_MUL_BITS GENMASK_ULL(38, 33) -#define CLOCK_BASE_RATE 50000000ULL +#define PTP_TIMESTAMP 0xF20ULL static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; -static u64 get_clock_rate(void) -{ - u64 cfg, ret = CLOCK_BASE_RATE * 16; - struct pci_dev *pdev; - void __iomem *base; - - /* To get the input clock frequency with which PTP co-processor - * block is running the base frequency(50 MHz) needs to be multiplied - * with multiplier bits present in RST_BOOT register of RESET block. - * Hence below code gets the multiplier bits from the RESET PCI - * device present in the system. - */ - pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_OCTEONTX2_RST, NULL); - if (!pdev) - goto error; - - base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO); - if (!base) - goto error_put_pdev; - - cfg = readq(base + RST_BOOT); - ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg); - - iounmap(base); - -error_put_pdev: - pci_dev_put(pdev); - -error: - return ret; -} - struct ptp *ptp_get(void) { struct ptp *ptp = first_ptp_block; @@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk) return 0; } +void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) +{ + struct pci_dev *pdev; + u64 clock_comp; + u64 clock_cfg; + + if (!ptp) + return; + + pdev = ptp->pdev; + + if (!sclk) { + dev_err(&pdev->dev, "PTP input clock cannot be zero\n"); + return; + } + + /* sclk is in MHz */ + ptp->clock_rate = sclk * 1000000; + + /* Enable PTP clock */ + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + + if (ext_clk_freq) { + ptp->clock_rate = ext_clk_freq; + /* Set GPIO as PTP clock source */ + clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK; + clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN; + } + + if (extts) { + clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE; + /* Set GPIO as timestamping source */ + clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK; + clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN; + } + + clock_cfg |= PTP_CLOCK_CFG_PTP_EN; + clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + + /* Set 50% duty cycle for 1Hz output */ + writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); + writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); + + clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + /* Initial compensation value to start the nanosecs counter */ + writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); +} + +static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) +{ + *clk = readq(ptp->reg_base + PTP_TIMESTAMP); + + return 0; +} + +static int ptp_set_thresh(struct ptp *ptp, u64 thresh) +{ + writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI); + + return 0; +} + static int ptp_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct ptp *ptp; - u64 clock_comp; - u64 clock_cfg; int err; ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL); @@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev, ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; - ptp->clock_rate = get_clock_rate(); - - /* Enable PTP clock */ - clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); - clock_cfg |= PTP_CLOCK_CFG_PTP_EN; - writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); - - clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; - /* Initial compensation value to start the nanosecs counter */ - writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); - pci_set_drvdata(pdev, ptp); if (!first_ptp_block) first_ptp_block = ptp; @@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_GET_CLOCK: err = ptp_get_clock(rvu->ptp, &rsp->clk); break; + case PTP_OP_GET_TSTMP: + err = ptp_get_tstmp(rvu->ptp, &rsp->clk); + break; + case PTP_OP_SET_THRESH: + err = ptp_set_thresh(rvu->ptp, req->thresh); + break; default: err = -EINVAL; break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 76d404b24552..1b81a0493cd3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -20,6 +20,7 @@ struct ptp { struct ptp *ptp_get(void); void ptp_put(struct ptp *ptp); +void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts); extern struct pci_driver ptp_driver; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 07b0eafccad8..e695fa0e82a9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, .mac_pause_frm_config = rpm_lmac_pause_frm_config, + .mac_enadis_ptp_config = rpm_lmac_ptp_config, }; struct mac_ops *rpm_get_mac_ops(void) @@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) return 0; } + +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return; + + cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG); + if (enable) + cfg |= RPMX_RX_TS_PREPEND; + else + cfg &= ~RPMX_RX_TS_PREPEND; + rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index f0b069442dcc..57c8a687b488 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -14,6 +14,8 @@ #define PCI_DEVID_CN10K_RPM 0xA060 /* Registers */ +#define RPMX_CMRX_CFG 0x00 +#define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_CMRX_SW_INT 0x180 #define RPMX_CMRX_SW_INT_W1S 0x188 #define RPMX_CMRX_SW_INT_ENA_W1S 0x198 @@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause); int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat); int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat); +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 35836903b7fb..0a1e9f6e216a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1287,6 +1287,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, return (val & 0xFFF); } +int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, + u16 global_slot, u16 *slot_in_block) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int numlfs, total_lfs = 0, nr_blocks = 0; + int i, num_blkaddr[BLK_COUNT] = { 0 }; + struct rvu_block *block; + int blkaddr; + u16 start_slot; + + if (!is_blktype_attached(pfvf, blktype)) + return -ENODEV; + + /* Get all the block addresses from which LFs are attached to + * the given pcifunc in num_blkaddr[]. + */ + for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) { + block = &rvu->hw->block[blkaddr]; + if (block->type != blktype) + continue; + if (!is_block_implemented(rvu->hw, blkaddr)) + continue; + + numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr); + if (numlfs) { + total_lfs += numlfs; + num_blkaddr[nr_blocks] = blkaddr; + nr_blocks++; + } + } + + if (global_slot >= total_lfs) + return -ENODEV; + + /* Based on the given global slot number retrieve the + * correct block address out of all attached block + * addresses and slot number in that block. + */ + total_lfs = 0; + blkaddr = -ENODEV; + for (i = 0; i < nr_blocks; i++) { + numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]); + total_lfs += numlfs; + if (global_slot < total_lfs) { + blkaddr = num_blkaddr[i]; + start_slot = total_lfs - numlfs; + *slot_in_block = global_slot - start_slot; + break; + } + } + + return blkaddr; +} + static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -2345,7 +2399,6 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw) int devid; if (mw->mbox_wq) { - flush_workqueue(mw->mbox_wq); destroy_workqueue(mw->mbox_wq); mw->mbox_wq = NULL; } @@ -2890,7 +2943,6 @@ fail: static void rvu_flr_wq_destroy(struct rvu *rvu) { if (rvu->flr_wq) { - flush_workqueue(rvu->flr_wq); destroy_workqueue(rvu->flr_wq); rvu->flr_wq = NULL; } @@ -3186,6 +3238,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&rvu->rswitch.switch_lock); + if (rvu->fwdata) + ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, + rvu->fwdata->ptp_ext_tstamp); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 1d9411232f1d..58b166698fa5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -220,6 +220,7 @@ struct rvu_pfvf { u16 maxlen; u16 minlen; + bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ @@ -237,6 +238,7 @@ struct rvu_pfvf { bool cgx_in_use; /* this PF/VF using CGX? */ int cgx_users; /* number of cgx users - used only by PFs */ + int intf_mode; u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ @@ -394,7 +396,9 @@ struct rvu_fwdata { u64 mcam_addr; u64 mcam_sz; u64 msixtr_base; -#define FWDATA_RESERVED_MEM 1023 + u32 ptp_ext_clk_rate; + u32 ptp_ext_tstamp; +#define FWDATA_RESERVED_MEM 1022 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 5 #define CGX_LMACS_MAX 4 @@ -656,6 +660,8 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_get_num_lbk_chans(void); +int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, + u16 global_slot, u16 *slot_in_block); /* RVU HW reg validation */ enum regmap_block { @@ -794,6 +800,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); @@ -827,4 +834,7 @@ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 81e8ea9ee30e..2ca182a4ce82 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -324,7 +324,6 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) static void rvu_cgx_wq_destroy(struct rvu *rvu) { if (rvu->cgx_evh_wq) { - flush_workqueue(rvu->cgx_evh_wq); destroy_workqueue(rvu->cgx_evh_wq); rvu->cgx_evh_wq = NULL; } @@ -411,7 +410,7 @@ int rvu_cgx_exit(struct rvu *rvu) * VF's of mapped PF and other PFs are not allowed. This fn() checks * whether a PFFUNC is permitted to do the config or not. */ -static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) +inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) @@ -694,7 +693,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -711,13 +712,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); - cgx_lmac_ptp_config(cgxd, lmac_id, enable); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required */ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) return -EINVAL; + /* This flag is required to clean up CGX conf if app gets killed */ + pfvf->hw_rx_tstamp_en = enable; return 0; } @@ -725,6 +729,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { + if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + return -EPERM; + return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 46a41cfff575..7dbbc115cde4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -334,8 +334,8 @@ int rvu_set_channels_base(struct rvu *rvu) /* Out of 4096 channels start CPT from 2048 so * that MSB for CPT channels is always set */ - if (cpt_chan_base <= 0x800) { - hw->cpt_chan_base = 0x800; + if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) { + hw->cpt_chan_base = NIX_CHAN_CPT_CH_START; } else { dev_err(rvu->dev, "CPT channels could not fit in the range 2048-4095\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 1f90a7403392..267d092b8e97 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -197,6 +197,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, return ret; } +static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf, + struct cpt_inline_ipsec_cfg_msg *req) +{ + u16 sso_pf_func = req->sso_pf_func; + u8 nix_sel; + u64 val; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + if (req->enable && (val & BIT_ULL(16))) { + /* IPSec inline outbound path is already enabled for a given + * CPT LF, HRM states that inline inbound & outbound paths + * must not be enabled at the same time for a given CPT LF + */ + return CPT_AF_ERR_INLINE_IPSEC_INB_ENA; + } + /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ + if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO)) + return CPT_AF_ERR_SSO_PF_FUNC_INVALID; + + nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0; + /* Enable CPT LF for IPsec inline inbound operations */ + if (req->enable) + val |= BIT_ULL(9); + else + val &= ~BIT_ULL(9); + + val |= (u64)nix_sel << 8; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + if (sso_pf_func) { + /* Set SSO_PF_FUNC */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val |= (u64)sso_pf_func << 32; + val |= (u64)req->nix_pf_func << 48; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + } + if (req->sso_pf_func_ovrd) + /* Set SSO_PF_FUNC_OVRD for inline IPSec */ + rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1); + + /* Configure the X2P Link register with the cpt base channel number and + * range of channels it should propagate to X2P + */ + if (!is_rvu_otx2(rvu)) { + val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16); + val |= rvu->hw->cpt_chan_base; + + rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val); + rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val); + } + + return 0; +} + +static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf, + struct cpt_inline_ipsec_cfg_msg *req) +{ + u16 nix_pf_func = req->nix_pf_func; + int nix_blkaddr; + u8 nix_sel; + u64 val; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + if (req->enable && (val & BIT_ULL(9))) { + /* IPSec inline inbound path is already enabled for a given + * CPT LF, HRM states that inline inbound & outbound paths + * must not be enabled at the same time for a given CPT LF + */ + return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA; + } + + /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ + if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX)) + return CPT_AF_ERR_NIX_PF_FUNC_INVALID; + + /* Enable CPT LF for IPsec inline outbound operations */ + if (req->enable) + val |= BIT_ULL(16); + else + val &= ~BIT_ULL(16); + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + if (nix_pf_func) { + /* Set NIX_PF_FUNC */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val |= (u64)nix_pf_func << 48; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + + nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func); + nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + val |= (u64)nix_sel << 8; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + } + + return 0; +} + +int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, + struct cpt_inline_ipsec_cfg_msg *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr, ret; + u16 actual_slot; + + blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, + req->slot, &actual_slot); + if (blkaddr < 0) + return CPT_AF_ERR_LF_INVALID; + + block = &rvu->hw->block[blkaddr]; + + cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + + switch (req->dir) { + case CPT_INLINE_INBOUND: + ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req); + break; + + case CPT_INLINE_OUTBOUND: + ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req); + break; + + default: + return CPT_AF_ERR_PARAM; + } + + return ret; +} + static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) { u64 offset = req->reg_offset; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 274d3abe30eb..70bacd38a6d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1510,13 +1510,6 @@ int rvu_register_dl(struct rvu *rvu) return -ENOMEM; } - err = devlink_register(dl); - if (err) { - dev_err(rvu->dev, "devlink register failed with error %d\n", err); - devlink_free(dl); - return err; - } - rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; @@ -1537,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu) goto err_dl_health; } - devlink_params_publish(dl); - + devlink_register(dl); return 0; err_dl_health: rvu_health_reporters_destroy(rvu); - devlink_unregister(dl); devlink_free(dl); return err; } @@ -1553,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu) struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct devlink *dl = rvu_dl->dl; - if (!dl) - return; - + devlink_unregister(dl); devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); rvu_health_reporters_destroy(rvu); - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 9ef4e942e31e..67feb26792e4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -28,6 +28,7 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); +static const char *nix_get_ctx_name(int ctype); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -1061,10 +1062,68 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } +static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, u8 ctype) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + int rc, word; + + if (req->ctype != NIX_AQ_CTYPE_CQ) + return 0; + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, + req->hdr.pcifunc, ctype, req->qidx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", + __func__, nix_get_ctx_name(ctype), req->qidx, + req->hdr.pcifunc); + return rc; + } + + /* Make copy of original context & mask which are required + * for resubmission + */ + memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + + /* exclude fields which HW can update */ + aq_req.cq_mask.cq_err = 0; + aq_req.cq_mask.wrptr = 0; + aq_req.cq_mask.tail = 0; + aq_req.cq_mask.head = 0; + aq_req.cq_mask.avg_level = 0; + aq_req.cq_mask.update_time = 0; + aq_req.cq_mask.substream = 0; + + /* Context mask (cq_mask) holds mask value of fields which + * are changed in AQ WRITE operation. + * for example cq.drop = 0xa; + * cq_mask.drop = 0xff; + * Below logic performs '&' between cq and cq_mask so that non + * updated fields are masked out for request and response + * comparison + */ + for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + word++) { + *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + *(u64 *)((u8 *)&aq_req.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + } + + if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; + + return 0; +} + static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; + int err, retries = 5; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); @@ -1075,7 +1134,24 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); +retry: + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); + + /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' + * As a work around perfrom CQ context read after each AQ write. If AQ + * read shows AQ write is not updated perform AQ write again. + */ + if (!err && req->op == NIX_AQ_INSTOP_WRITE) { + err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); + if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { + if (retries--) + goto retry; + else + return NIX_AF_ERR_CQ_CTX_WRITE_ERR; + } + } + + return err; } static const char *nix_get_ctx_name(int ctype) @@ -4440,6 +4516,10 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; @@ -4476,6 +4556,22 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) dev_err(rvu->dev, "CQ ctx disable failed\n"); } + /* reset HW config done for Switch headers */ + rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, + (PKIND_TX | PKIND_RX), 0, 0, 0, 0); + + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + nix_ctx_free(rvu, pfvf); nix_free_all_bandprof(rvu, pcifunc); @@ -4579,6 +4675,119 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, return 0; } +#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) +#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) +#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) +#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) + +#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) +#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) +#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) + +static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, + int blkaddr) +{ + u8 cpt_idx, cpt_blkaddr; + u64 val; + + cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; + if (req->enable) { + val = 0; + /* Enable context prefetching */ + if (!is_rvu_otx2(rvu)) + val |= BIT_ULL(51); + + /* Set OPCODE and EGRP */ + val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); + val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); + + /* Set CPT queue for inline IPSec */ + val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); + val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, + req->inst_qsel.cpt_pf_func); + + if (!is_rvu_otx2(rvu)) { + cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : + BLKADDR_CPT1; + val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); + } + + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + val); + + /* Set CPT credit */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + req->cpt_credit); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF); + } +} + +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, + struct nix_inline_ipsec_cfg *req, + struct msg_rsp *rsp) +{ + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); + if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); + + return 0; +} + +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, + struct nix_inline_ipsec_lf_cfg *req, + struct msg_rsp *rsp) +{ + int lf, blkaddr, err; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); + if (err) + return err; + + if (req->enable) { + /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ + val = (u64)req->ipsec_cfg0.tt << 44 | + (u64)req->ipsec_cfg0.tag_const << 20 | + (u64)req->ipsec_cfg0.sa_pow2_size << 16 | + req->ipsec_cfg0.lenm1_max; + + if (blkaddr == BLKADDR_NIX1) + val |= BIT_ULL(46); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); + + /* Set SA_IDX_W and SA_IDX_MAX */ + val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | + req->ipsec_cfg1.sa_idx_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); + + /* Set SA base address */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + req->sa_base_addr); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + 0x0); + } + + return 0; +} void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 5efb4174e82d..bb6b42bbefa4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } +static int +npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, + u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) +{ + struct npc_kpu_action0 *act0; + u8 shift_count = 0; + int blkaddr; + u64 val; + + if (!var_len_off_mask) + return -EINVAL; + + if (var_len_off_mask != 0xff) { + if (shift_dir) + shift_count = __ffs(var_len_off_mask); + else + shift_count = (8 - __fls(var_len_off_mask)); + } + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -EINVAL; + } + val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); + act0 = (struct npc_kpu_action0 *)&val; + act0->var_len_shift = shift_count; + act0->var_len_right = shift_dir; + act0->var_len_mask = var_len_off_mask; + act0->var_len_offset = var_len_off; + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); + return 0; +} + +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir) + +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr, nixlf, rc, intf_mode; + int pf = rvu_get_pf(pcifunc); + u64 rxpkind, txpkind; + u8 cgx_id, lmac_id; + + /* use default pkind to disable edsa/higig */ + rxpkind = rvu_npc_get_pkind(rvu, pf); + txpkind = NPC_TX_DEF_PKIND; + intf_mode = NPC_INTF_MODE_DEF; + + if (mode & OTX2_PRIV_FLAGS_CUSTOM) { + if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { + rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, + var_len_off, + var_len_off_mask, + shift_dir); + if (rc) + return rc; + } + rxpkind = pkind; + txpkind = pkind; + } + + if (dir & PKIND_RX) { + /* rx pkind set req valid only for cgx mapped PFs */ + if (!is_cgx_config_permitted(rvu, pcifunc)) + return 0; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + rxpkind); + if (rc) + return rc; + } + + if (dir & PKIND_TX) { + /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), + txpkind); + } + + pfvf->intf_mode = intf_mode; + return 0; +} + +int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, + struct msg_rsp *rsp) +{ + return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, + req->dir, req->pkind, req->var_len_off, + req->var_len_off_mask, req->shift_dir); +} + int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 21f1ed4e222f..dbaeb10de7c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -236,6 +236,8 @@ #define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8) #define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3) +#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3) #define NIX_AF_NDC_TX_SYNC (0x03F0) #define NIX_AF_AQ_CFG (0x0400) #define NIX_AF_AQ_BASE (0x0410) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index b92c267628b8..aaf9accc40ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -9,6 +9,6 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o -rvu_nicvf-y := otx2_vf.o otx2_devlink.o +rvu_nicvf-y := otx2_vf.o otx2_devlink.o otx2_ptp.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 95f21dfdba48..fd4f083c699e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 78df173e6df2..66da31f30d3e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -188,7 +188,7 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, return PTR_ERR(msghdr); } rsp = (struct nix_get_mac_addr_rsp *)msghdr; - ether_addr_copy(netdev->dev_addr, rsp->mac_addr); + eth_hw_addr_set(netdev, rsp->mac_addr); mutex_unlock(&pfvf->mbox.lock); return 0; @@ -203,7 +203,7 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); /* update dmac field in vlan offload rule */ if (netif_running(netdev) && pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) @@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) return -ENOMEM; } - req->maxlen = pfvf->max_frs; + req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); @@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) u64 schq, parent; u64 dwrr_val; - dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); + dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) @@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) /* Set topology e.t.c configuration */ if (lvl == NIX_TXSCH_LVL_SMQ) { req->reg[0] = NIX_AF_SMQX_CFG(schq); - req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8) - | OTX2_MIN_MTU; - + req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | (0x2ULL << 36); req->num_regs++; @@ -718,7 +716,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) int timeout = 1000; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); - for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { incr = (u64)qidx << 32; while (timeout) { val = otx2_atomic64_add(incr, ptr); @@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); + aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; @@ -835,17 +833,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; - err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, - TSO_HEADER_SIZE); - if (err) - return err; + if (qidx < pfvf->hw.tx_queues) { + err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, + TSO_HEADER_SIZE); + if (err) + return err; + } sq->sqe_base = sq->sqe->base; sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); if (!sq->sg) return -ENOMEM; - if (pfvf->ptp) { + if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); if (err) @@ -871,20 +871,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) { struct otx2_qset *qset = &pfvf->qset; + int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; - int err, pool_id; cq = &qset->cq[qidx]; cq->cq_idx = qidx; + non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; if (qidx < pfvf->hw.rx_queues) { cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - } else { + if (pfvf->xdp_prog) + xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; cq->cqe_cnt = qset->sqe_cnt; + } else { + cq->cq_type = CQ_XDP; + cq->cint_idx = qidx - non_xdp_queues; + cq->cqe_cnt = qset->sqe_cnt; } cq->cqe_size = pfvf->qset.xqe_size; @@ -991,7 +998,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf) } /* Initialize TX queues */ - for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); err = otx2_sq_init(pfvf, qidx, sqb_aura); @@ -1006,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf) return err; } + pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, + NIX_LF_CQ_OP_STATUS); + /* Initialize work queue for receive buffer refill */ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, sizeof(struct refill_work), GFP_KERNEL); @@ -1035,7 +1045,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) /* Set RQ/SQ/CQ counts */ nixlf->rq_cnt = pfvf->hw.rx_queues; - nixlf->sq_cnt = pfvf->hw.tx_queues; + nixlf->sq_cnt = pfvf->hw.tot_tx_queues; nixlf->cq_cnt = pfvf->qset.cq_cnt; nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; nixlf->rss_grps = MAX_RSS_GROUPS; @@ -1073,7 +1083,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf) int sqb, qidx; u64 iova, pa; - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { sq = &qset->sq[qidx]; if (!sq->sqb_ptrs) continue; @@ -1285,7 +1295,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) stack_pages = (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); /* Initialize aura context */ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); @@ -1305,7 +1315,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) goto fail; /* Allocate pointers and free them to aura/pool */ - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); pool = &pfvf->qset.pool[pool_id]; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index a51ecd771d07..61e52812983f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -171,6 +171,8 @@ struct otx2_hw { struct otx2_rss_info rss_info; u16 rx_queues; u16 tx_queues; + u16 xdp_queues; + u16 tot_tx_queues; u16 max_queues; u16 pool_cnt; u16 rqpool_cnt; @@ -223,6 +225,7 @@ struct otx2_hw { #define HW_TSO 0 #define CN10K_MBOX 1 #define CN10K_LMTST 2 +#define CN10K_RPM 3 unsigned long cap_flag; #define LMT_LINE_SIZE 128 @@ -263,6 +266,12 @@ struct otx2_ptp { struct cyclecounter cycle_counter; struct timecounter time_counter; + + struct delayed_work extts_work; + u64 last_extts; + u64 thresh; + + struct ptp_pin_desc extts_config; }; #define OTX2_HW_TIMESTAMP_LEN 8 @@ -317,7 +326,7 @@ struct otx2_nic { struct net_device *netdev; struct dev_hw_ops *hw_ops; void *iommu_domain; - u16 max_frs; + u16 tx_max_pktlen; u16 rbsize; /* Receive buffer size */ #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) @@ -336,7 +345,9 @@ struct otx2_nic { #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; + u64 *cq_op_addr; + struct bpf_prog *xdp_prog; struct otx2_qset qset; struct otx2_hw hw; struct pci_dev *pdev; @@ -452,6 +463,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) if (!is_dev_otx2(pfvf->pdev)) { __set_bit(CN10K_MBOX, &hw->cap_flag); __set_bit(CN10K_LMTST, &hw->cap_flag); + __set_bit(CN10K_RPM, &hw->cap_flag); } } @@ -825,6 +837,9 @@ int otx2_open(struct net_device *netdev); int otx2_stop(struct net_device *netdev); int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); +int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); +int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); + /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); @@ -845,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 7ac3ef2fa06a..777a27047c8e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -108,13 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf) return -ENOMEM; } - err = devlink_register(dl); - if (err) { - dev_err(pfvf->dev, "devlink register failed with error %d\n", err); - devlink_free(dl); - return err; - } - otx2_dl = devlink_priv(dl); otx2_dl->dl = dl; otx2_dl->pfvf = pfvf; @@ -128,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf) goto err_dl; } - devlink_params_publish(dl); - + devlink_register(dl); return 0; err_dl: - devlink_unregister(dl); devlink_free(dl); return err; } @@ -141,16 +132,10 @@ err_dl: void otx2_unregister_dl(struct otx2_nic *pfvf) { struct otx2_devlink *otx2_dl = pfvf->dl; - struct devlink *dl; - - if (!otx2_dl || !otx2_dl->dl) - return; - - dl = otx2_dl->dl; + struct devlink *dl = otx2_dl->dl; + devlink_unregister(dl); devlink_params_unregister(dl, otx2_dl_params, ARRAY_SIZE(otx2_dl_params)); - - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index dbfa3bc39e34..b0f57bda7e27 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -121,14 +121,16 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) otx2_get_qset_strings(pfvf, &data, 0); - for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { - sprintf(data, "cgx_rxstat%d: ", stats); - data += ETH_GSTRING_LEN; - } + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { + for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { + sprintf(data, "cgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } - for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { - sprintf(data, "cgx_txstat%d: ", stats); - data += ETH_GSTRING_LEN; + for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { + sprintf(data, "cgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } } strcpy(data, "reset_count"); @@ -205,11 +207,15 @@ static void otx2_get_ethtool_stats(struct net_device *netdev, [otx2_drv_stats[stat].index]); otx2_get_qset_stats(pfvf, stats, &data); - otx2_update_lmac_stats(pfvf); - for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) - *(data++) = pfvf->hw.cgx_rx_stats[stat]; - for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) - *(data++) = pfvf->hw.cgx_tx_stats[stat]; + + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { + otx2_update_lmac_stats(pfvf); + for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_rx_stats[stat]; + for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_tx_stats[stat]; + } + *(data++) = pfvf->reset_count; fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; @@ -242,18 +248,19 @@ static void otx2_get_ethtool_stats(struct net_device *netdev, static int otx2_get_sset_count(struct net_device *netdev, int sset) { struct otx2_nic *pfvf = netdev_priv(netdev); - int qstats_count; + int qstats_count, mac_stats = 0; if (sset != ETH_SS_STATS) return -EINVAL; qstats_count = otx2_n_queue_stats * (pfvf->hw.rx_queues + pfvf->hw.tx_queues); + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) + mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT; otx2_update_lmac_fec_stats(pfvf); return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + - CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT - + 1; + mac_stats + OTX2_FEC_STATS_CNT + 1; } /* Get no of queues device supports and current queue count */ @@ -1340,6 +1347,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_pauseparam = otx2_get_pauseparam, .set_pauseparam = otx2_set_pauseparam, .get_link_ksettings = otx2vf_get_link_ksettings, + .get_ts_info = otx2_get_ts_info, }; void otx2vf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53df7fff92c4..1e0d0c9c1dac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -13,6 +13,8 @@ #include <linux/if_vlan.h> #include <linux/iommu.h> #include <net/ip.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); static int otx2_change_mtu(struct net_device *netdev, int new_mtu) { + struct otx2_nic *pf = netdev_priv(netdev); bool if_up = netif_running(netdev); int err = 0; + if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } if (if_up) otx2_stop(netdev); @@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) } /* SQ */ - for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | @@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf) otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); /* Free SQB pointers */ otx2_sq_free_sqbs(pf); - for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { sq = &qset->sq[qidx]; qmem_free(pf->dev, sq->sqe); qmem_free(pf->dev, sq->tso_hdrs); @@ -1304,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) * NIX transfers entire data using 6 segments/buffers and writes * a CQE_RX descriptor with those segment addresses. First segment * has additional data prepended to packet. Also software omits a - * headroom of 128 bytes and sizeof(struct skb_shared_info) in - * each segment. Hence the total size of memory needed - * to receive a packet with 'mtu' is: + * headroom of 128 bytes in each segment. Hence the total size of + * memory needed to receive a packet with 'mtu' is: * frame size = mtu + additional data; - * memory = frame_size + (headroom + struct skb_shared_info size) * 6; + * memory = frame_size + headroom * 6; * each receive buffer size = memory / 6; */ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; - total_size = frame_size + (OTX2_HEAD_ROOM + - OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6; + total_size = frame_size + OTX2_HEAD_ROOM * 6; rbuf_size = total_size / 6; return ALIGN(rbuf_size, 2048); @@ -1332,10 +1338,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) * so, aura count = pool count. */ hw->rqpool_cnt = hw->rx_queues; - hw->sqpool_cnt = hw->tx_queues; + hw->sqpool_cnt = hw->tot_tx_queues; hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; - pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; + /* Maximum hardware supported transmit length */ + pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); @@ -1493,6 +1500,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_unlock(&mbox->lock); } +static void otx2_do_set_rx_mode(struct otx2_nic *pf) +{ + struct net_device *netdev = pf->netdev; + struct nix_rx_mode *req; + bool promisc = false; + + if (!(netdev->flags & IFF_UP)) + return; + + if ((netdev->flags & IFF_PROMISC) || + (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + promisc = true; + } + + /* Write unicast address to mcam entries or del from mcam */ + if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) + __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return; + } + + req->mode = NIX_RX_MODE_UCAST; + + if (promisc) + req->mode |= NIX_RX_MODE_PROMISC; + if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + req->mode |= NIX_RX_MODE_ALLMULTI; + + req->mode |= NIX_RX_MODE_USE_MCE; + + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + int otx2_open(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1503,7 +1548,7 @@ int otx2_open(struct net_device *netdev) netif_carrier_off(netdev); - pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues; + pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues; /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ @@ -1523,7 +1568,7 @@ int otx2_open(struct net_device *netdev) if (!qset->cq) goto err_free_mem; - qset->sq = kcalloc(pf->hw.tx_queues, + qset->sq = kcalloc(pf->hw.tot_tx_queues, sizeof(struct otx2_snd_queue), GFP_KERNEL); if (!qset->sq) goto err_free_mem; @@ -1544,11 +1589,20 @@ int otx2_open(struct net_device *netdev) /* RQ0 & SQ0 are mapped to CINT0 and so on.. * 'cq_ids[0]' points to RQ's CQ and * 'cq_ids[1]' points to SQ's CQ and + * 'cq_ids[2]' points to XDP's CQ and */ cq_poll->cq_ids[CQ_RX] = (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? qidx + pf->hw.rx_queues : CINT_INVALID_CQ; + if (pf->xdp_prog) + cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ? + (qidx + pf->hw.rx_queues + + pf->hw.tx_queues) : + CINT_INVALID_CQ; + else + cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; + cq_poll->dev = (void *)pf; netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler, NAPI_POLL_WEIGHT); @@ -1646,6 +1700,8 @@ int otx2_open(struct net_device *netdev) if (err) goto err_tx_stop_queues; + otx2_do_set_rx_mode(pf); + return 0; err_tx_stop_queues: @@ -1750,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) /* Check for minimum and maximum packet length */ if (skb->len <= ETH_HLEN || - (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) { + (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -1791,43 +1847,11 @@ static void otx2_set_rx_mode(struct net_device *netdev) queue_work(pf->otx2_wq, &pf->rx_mode_work); } -static void otx2_do_set_rx_mode(struct work_struct *work) +static void otx2_rx_mode_wrk_handler(struct work_struct *work) { struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); - struct net_device *netdev = pf->netdev; - struct nix_rx_mode *req; - bool promisc = false; - - if (!(netdev->flags & IFF_UP)) - return; - - if ((netdev->flags & IFF_PROMISC) || - (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { - promisc = true; - } - - /* Write unicast address to mcam entries or del from mcam */ - if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) - __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); - - mutex_lock(&pf->mbox.lock); - req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); - if (!req) { - mutex_unlock(&pf->mbox.lock); - return; - } - - req->mode = NIX_RX_MODE_UCAST; - - if (promisc) - req->mode |= NIX_RX_MODE_PROMISC; - if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) - req->mode |= NIX_RX_MODE_ALLMULTI; - req->mode |= NIX_RX_MODE_USE_MCE; - - otx2_sync_mbox_msg(&pf->mbox); - mutex_unlock(&pf->mbox.lock); + otx2_do_set_rx_mode(pf); } static int otx2_set_features(struct net_device *netdev, @@ -1967,7 +1991,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) return 0; } -static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config config; @@ -2023,8 +2047,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +EXPORT_SYMBOL(otx2_config_hwtstamp); -static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config *cfg = &pfvf->tstamp; @@ -2039,6 +2064,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_ioctl); static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) { @@ -2281,6 +2307,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, + int qidx) +{ + struct page *page; + u64 dma_addr; + int err = 0; + + dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data), + offset_in_page(xdpf->data), xdpf->len, + DMA_TO_DEVICE); + if (dma_mapping_error(pf->dev, dma_addr)) + return -ENOMEM; + + err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); + if (!err) { + otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); + page = virt_to_page(xdpf->data); + put_page(page); + return -ENOMEM; + } + return 0; +} + +static int otx2_xdp_xmit(struct net_device *netdev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int qidx = smp_processor_id(); + struct otx2_snd_queue *sq; + int drops = 0, i; + + if (!netif_running(netdev)) + return -ENETDOWN; + + qidx += pf->hw.tx_queues; + sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL; + + /* Abort xmit if xdp queue is not */ + if (unlikely(!sq)) + return -ENXIO; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = otx2_xdp_xmit_tx(pf, xdpf, qidx); + if (err) + drops++; + } + return n - drops; +} + +static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) +{ + struct net_device *dev = pf->netdev; + bool if_up = netif_running(pf->netdev); + struct bpf_prog *old_prog; + + if (prog && dev->mtu > MAX_XDP_MTU) { + netdev_warn(dev, "Jumbo frames not yet supported with XDP\n"); + return -EOPNOTSUPP; + } + + if (if_up) + otx2_stop(pf->netdev); + + old_prog = xchg(&pf->xdp_prog, prog); + + if (old_prog) + bpf_prog_put(old_prog); + + if (pf->xdp_prog) + bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1); + + /* Network stack and XDP shared same rx queues. + * Use separate tx queues for XDP and network stack. + */ + if (pf->xdp_prog) + pf->hw.xdp_queues = pf->hw.rx_queues; + else + pf->hw.xdp_queues = 0; + + pf->hw.tot_tx_queues += pf->hw.xdp_queues; + + if (if_up) + otx2_open(pf->netdev); + + return 0; +} + +static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return otx2_xdp_setup(pf, xdp->prog); + default: + return -EINVAL; + } +} + static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, int req_perm) { @@ -2348,6 +2479,8 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, + .ndo_bpf = otx2_xdp, + .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, }; @@ -2358,7 +2491,7 @@ static int otx2_wq_init(struct otx2_nic *pf) if (!pf->otx2_wq) return -ENOMEM; - INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode); + INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler); INIT_WORK(&pf->reset_task, otx2_reset_task); return 0; } @@ -2489,6 +2622,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->pdev = pdev; hw->rx_queues = qcount; hw->tx_queues = qcount; + hw->tot_tx_queues = qcount; hw->max_queues = qcount; num_vec = pci_msix_vec_count(pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index ec9e49985c2c..85b1f140d3dd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) return otx2_sync_mbox_msg(&ptp->nic->mbox); } +static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) +{ + struct ptp_req *req; + + if (!ptp->nic) + return -ENODEV; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return -ENOMEM; + + req->op = PTP_OP_SET_THRESH; + req->thresh = thresh; + + return otx2_sync_mbox_msg(&ptp->nic->mbox); +} + static u64 ptp_cc_read(const struct cyclecounter *cc) { struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); @@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc) return rsp->clk; } +static u64 ptp_tstmp_read(struct otx2_ptp *ptp) +{ + struct ptp_req *req; + struct ptp_rsp *rsp; + int err; + + if (!ptp->nic) + return 0; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return 0; + + req->op = PTP_OP_GET_TSTMP; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return 0; + + rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + if (IS_ERR(rsp)) + return 0; + + return rsp->clk; +} + static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, return 0; } +static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + break; + case PTP_PF_PEROUT: + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +static void otx2_ptp_extts_check(struct work_struct *work) +{ + struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, + extts_work.work); + struct ptp_clock_event event; + u64 tstmp, new_thresh; + + mutex_lock(&ptp->nic->mbox.lock); + tstmp = ptp_tstmp_read(ptp); + mutex_unlock(&ptp->nic->mbox.lock); + + if (tstmp != ptp->last_extts) { + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); + ptp_clock_event(ptp->ptp_clock, &event); + ptp->last_extts = tstmp; + + new_thresh = tstmp % 500000000; + if (ptp->thresh != new_thresh) { + mutex_lock(&ptp->nic->mbox.lock); + ptp_set_thresh(ptp, new_thresh); + mutex_unlock(&ptp->nic->mbox.lock); + ptp->thresh = new_thresh; + } + } + schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); +} + static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + int pin; + + if (!ptp->nic) + return -ENODEV; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + if (on) + schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); + else + cancel_delayed_work_sync(&ptp->extts_work); + return 0; + default: + break; + } return -EOPNOTSUPP; } @@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf) struct ptp_req *req; int err; + if (is_otx2_lbkvf(pfvf->pdev)) { + pfvf->ptp = NULL; + return 0; + } + mutex_lock(&pfvf->mbox.lock); /* check if PTP block is available */ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox); @@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf) timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, ktime_to_ns(ktime_get_real())); + snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); + ptp_ptr->extts_config.index = 0; + ptp_ptr->extts_config.func = PTP_PF_NONE; + ptp_ptr->ptp_info = (struct ptp_clock_info) { .owner = THIS_MODULE, .name = "OcteonTX2 PTP", .max_adj = 1000000000ull, - .n_ext_ts = 0, - .n_pins = 0, + .n_ext_ts = 1, + .n_pins = 1, .pps = 0, + .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, .adjtime = otx2_ptp_adjtime, .gettime64 = otx2_ptp_gettime, .settime64 = otx2_ptp_settime, .enable = otx2_ptp_enable, + .verify = otx2_ptp_verify_pin, }; + INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); + ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) { err = ptp_ptr->ptp_clock ? diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index f42b1d4e0c67..0cc6353254bf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -8,6 +8,8 @@ #include <linux/etherdevice.h> #include <net/ip.h> #include <net/tso.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -17,6 +19,35 @@ #include "cn10k.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) +static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, + struct bpf_prog *prog, + struct nix_cqe_rx_s *cqe, + struct otx2_cq_queue *cq); + +static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq) +{ + u64 incr = (u64)(cq->cq_idx) << 32; + u64 status; + + status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); + + if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) || + status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) { + dev_err(pfvf->dev, "CQ stopped due to error"); + return -EINVAL; + } + + cq->cq_tail = status & 0xFFFFF; + cq->cq_head = (status >> 20) & 0xFFFFF; + if (cq->cq_tail < cq->cq_head) + cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + + cq->cq_tail; + else + cq->pend_cqe = cq->cq_tail - cq->cq_head; + + return 0; +} static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) { @@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) sg->num_segs = 0; } +static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, + struct otx2_snd_queue *sq, + struct nix_cqe_tx_s *cqe) +{ + struct nix_send_comp_s *snd_comp = &cqe->comp; + struct sg_list *sg; + struct page *page; + u64 pa; + + sg = &sq->sg[snd_comp->sqe_id]; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); + otx2_dma_unmap_page(pfvf, sg->dma_addr[0], + sg->size[0], DMA_TO_DEVICE); + page = virt_to_page(phys_to_virt(pa)); + put_page(page); +} + static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, struct otx2_snd_queue *sq, @@ -132,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf, skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); } -static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, - u64 iova, int len, struct nix_rx_parse_s *parse) +static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, + u64 iova, int len, struct nix_rx_parse_s *parse, + int qidx) { struct page *page; int off = 0; @@ -154,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, } page = virt_to_page(va); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - va - page_address(page) + off, len - off, pfvf->rbsize); + if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + va - page_address(page) + off, + len - off, pfvf->rbsize); + + otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, + pfvf->rbsize, DMA_FROM_DEVICE); + return true; + } - otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, - pfvf->rbsize, DMA_FROM_DEVICE); + /* If more than MAX_SKB_FRAGS fragments are received then + * give back those buffer pointers to hardware for reuse. + */ + pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); + + return false; } static void otx2_set_rxhash(struct otx2_nic *pfvf, @@ -285,6 +346,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, return; } + if (pfvf->xdp_prog) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) + return; + skb = napi_get_frags(napi); if (unlikely(!skb)) return; @@ -296,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, seg_addr = &sg->seg_addr; seg_size = (void *)sg; for (seg = 0; seg < sg->segs; seg++, seg_addr++) { - otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg], - parse); - cq->pool_ptrs++; + if (otx2_skb_add_frag(pfvf, skb, *seg_addr, + seg_size[seg], parse, cq->cq_idx)) + cq->pool_ptrs++; } start += sizeof(*sg); } @@ -318,7 +383,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe; int processed_cqe = 0; - while (likely(processed_cqe < budget)) { + if (cq->pend_cqe >= budget) + goto process_cqe; + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return 0; + +process_cqe: + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || !cqe->sg.seg_addr) { @@ -334,17 +406,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->sg.seg_addr = 0x00; processed_cqe++; + cq->pend_cqe--; } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); - if (unlikely(!cq->pool_ptrs)) - return 0; - /* Refill pool with new buffers */ - pfvf->hw_ops->refill_pool_ptrs(pfvf, cq); - return processed_cqe; } @@ -364,22 +432,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { - int tx_pkts = 0, tx_bytes = 0; + int tx_pkts = 0, tx_bytes = 0, qidx; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; - while (likely(processed_cqe < budget)) { + if (cq->pend_cqe >= budget) + goto process_cqe; + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return 0; + +process_cqe: + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { if (!processed_cqe) return 0; break; } - otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], - cqe, budget, &tx_pkts, &tx_bytes); - + if (cq->cq_type == CQ_XDP) { + qidx = cq->cq_idx - pfvf->hw.rx_queues; + otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], + cqe); + } else { + otx2_snd_pkt_handler(pfvf, cq, + &pfvf->qset.sq[cq->cint_idx], + cqe, budget, &tx_pkts, &tx_bytes); + } cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; + cq->pend_cqe--; } /* Free CQEs to HW */ @@ -402,6 +484,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, int otx2_napi_handler(struct napi_struct *napi, int budget) { + struct otx2_cq_queue *rx_cq = NULL; struct otx2_cq_poll *cq_poll; int workdone = 0, cq_idx, i; struct otx2_cq_queue *cq; @@ -412,17 +495,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) pfvf = (struct otx2_nic *)cq_poll->dev; qset = &pfvf->qset; - for (i = CQS_PER_CINT - 1; i >= 0; i--) { + for (i = 0; i < CQS_PER_CINT; i++) { cq_idx = cq_poll->cq_ids[i]; if (unlikely(cq_idx == CINT_INVALID_CQ)) continue; cq = &qset->cq[cq_idx]; if (cq->cq_type == CQ_RX) { - /* If the RQ refill WQ task is running, skip napi - * scheduler for this queue. - */ - if (cq->refill_task_sched) - continue; + rx_cq = cq; workdone += otx2_rx_napi_handler(pfvf, napi, cq, budget); } else { @@ -430,6 +509,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } } + if (rx_cq && rx_cq->pool_ptrs) + pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -936,10 +1017,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) int processed_cqe = 0; u64 iova, pa; - while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { - if (!cqe->sg.subdc) - continue; + if (pfvf->xdp_prog) + xdp_rxq_info_unreg(&cq->xdp_rxq); + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return; + + while (cq->pend_cqe) { + cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; + cq->pend_cqe--; + + if (!cqe) + continue; if (cqe->sg.segs > 1) { otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); continue; @@ -965,7 +1055,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) sq = &pfvf->qset.sq[cq->cint_idx]; - while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) { + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return; + + while (cq->pend_cqe) { + cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); + processed_cqe++; + cq->pend_cqe--; + + if (!cqe) + continue; sg = &sq->sg[cqe->comp.sqe_id]; skb = (struct sk_buff *)sg->skb; if (skb) { @@ -973,7 +1072,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) dev_kfree_skb_any(skb); sg->skb = (u64)NULL; } - processed_cqe++; } /* Free CQEs to HW */ @@ -1001,3 +1099,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) mutex_unlock(&pfvf->mbox.lock); return err; } + +static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, + int len, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u64 *iova = NULL; + + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 1; + sg->seg1_size = len; + iova = (void *)sg + sizeof(*sg); + *iova = dma_addr; + *offset += sizeof(*sg) + sizeof(u64); + + sq->sg[sq->head].dma_addr[0] = dma_addr; + sq->sg[sq->head].size[0] = len; + sq->sg[sq->head].num_segs = 1; +} + +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset, free_sqe; + + sq = &pfvf->qset.sq[qidx]; + free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + if (free_sqe < sq->sqe_thresh) + return false; + + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, iova, len, &offset); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); + + return true; +} + +static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, + struct bpf_prog *prog, + struct nix_cqe_rx_s *cqe, + struct otx2_cq_queue *cq) +{ + unsigned char *hard_start, *data; + int qidx = cq->cq_idx; + struct xdp_buff xdp; + struct page *page; + u64 iova, pa; + u32 act; + int err; + + iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + page = virt_to_page(phys_to_virt(pa)); + + xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); + + data = (unsigned char *)phys_to_virt(pa); + hard_start = page_address(page); + xdp_prepare_buff(&xdp, hard_start, data - hard_start, + cqe->sg.seg_size, false); + + act = bpf_prog_run_xdp(prog, &xdp); + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + qidx += pfvf->hw.tx_queues; + cq->pool_ptrs++; + return otx2_xdp_sq_append_pkt(pfvf, iova, + cqe->sg.seg_size, qidx); + case XDP_REDIRECT: + cq->pool_ptrs++; + err = xdp_do_redirect(pfvf->netdev, &xdp, prog); + + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + if (!err) + return true; + put_page(page); + break; + default: + bpf_warn_invalid_xdp_action(act); + break; + case XDP_ABORTED: + trace_xdp_exception(pfvf->netdev, prog, act); + break; + case XDP_DROP: + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + put_page(page); + cq->pool_ptrs++; + return true; + } + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 3ff1ad79c001..f1a04cf9210c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -11,6 +11,7 @@ #include <linux/etherdevice.h> #include <linux/iommu.h> #include <linux/if_vlan.h> +#include <net/xdp.h> #define LBK_CHAN_BASE 0x000 #define SDP_CHAN_BASE 0x700 @@ -25,6 +26,8 @@ #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 +#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN) + /* Rx buffer size should be in multiples of 128bytes */ #define RCV_FRAG_LEN1(x) \ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \ @@ -36,9 +39,7 @@ #define RCV_FRAG_LEN(x) \ ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x)) -#define DMA_BUFFER_LEN(x) \ - ((x) - OTX2_HEAD_ROOM - \ - OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM) /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT] * is equal to this value. @@ -56,6 +57,9 @@ */ #define CQ_QCOUNT_DEFAULT 1 +#define CQ_OP_STAT_OP_ERR 63 +#define CQ_OP_STAT_CQ_ERR 46 + struct queue_stats { u64 bytes; u64 pkts; @@ -96,7 +100,8 @@ struct otx2_snd_queue { enum cq_type { CQ_RX, CQ_TX, - CQS_PER_CINT = 2, /* RQ + SQ */ + CQ_XDP, + CQS_PER_CINT = 3, /* RQ + SQ + XDP */ }; struct otx2_cq_poll { @@ -122,9 +127,12 @@ struct otx2_cq_queue { u16 pool_ptrs; u32 cqe_cnt; u32 cq_head; + u32 cq_tail; + u32 pend_cqe; void *cqe_base; struct qmem *cqe; struct otx2_pool *rbpool; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct otx2_qset { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 03b4ec630432..e6cb8cd0787d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -8,9 +8,11 @@ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/net_tstamp.h> #include "otx2_common.h" #include "otx2_reg.h" +#include "otx2_ptp.h" #include "cn10k.h" #define DRV_NAME "rvu_nicvf" @@ -277,7 +279,6 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) struct mbox *mbox = &vf->mbox; if (vf->mbox_wq) { - flush_workqueue(vf->mbox_wq); destroy_workqueue(vf->mbox_wq); vf->mbox_wq = NULL; } @@ -500,6 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, + .ndo_do_ioctl = otx2_ioctl, }; static int otx2_wq_init(struct otx2_nic *vf) @@ -583,6 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->rx_queues = qcount; hw->tx_queues = qcount; hw->max_queues = qcount; + hw->tot_tx_queues = qcount; hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); @@ -640,6 +643,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; + /* Don't check for error. Proceed without ptp */ + otx2_ptp_init(vf); + /* Assign default mac address */ otx2_get_mac_from_af(netdev); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index 68b442eb6d69..06279cd6da67 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = { }, }; -static void prestera_devlink_traps_fini(struct prestera_switch *sw); - static int prestera_drop_counter_get(struct devlink *devlink, const struct devlink_trap *trap, u64 *p_drops); @@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink, enum devlink_trap_action action, struct netlink_ext_ack *extack); -static int prestera_devlink_traps_register(struct prestera_switch *sw); - static const struct devlink_ops prestera_dl_ops = { .info_get = prestera_dl_info_get, .trap_init = prestera_trap_init, @@ -407,38 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw) devlink_free(dl); } -int prestera_devlink_register(struct prestera_switch *sw) +void prestera_devlink_register(struct prestera_switch *sw) { struct devlink *dl = priv_to_devlink(sw); - int err; - - err = devlink_register(dl); - if (err) { - dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); - return err; - } - err = prestera_devlink_traps_register(sw); - if (err) { - devlink_unregister(dl); - dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n", - err); - return err; - } - - return 0; + devlink_register(dl); } void prestera_devlink_unregister(struct prestera_switch *sw) { - struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); - prestera_devlink_traps_fini(sw); devlink_unregister(dl); - - kfree(trap_data->trap_items_arr); - kfree(trap_data); } int prestera_devlink_port_register(struct prestera_port *port) @@ -486,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev) return &port->dl_port; } -static int prestera_devlink_traps_register(struct prestera_switch *sw) +int prestera_devlink_traps_register(struct prestera_switch *sw) { const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr); const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr); @@ -625,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink, cpu_code_type, p_drops); } -static void prestera_devlink_traps_fini(struct prestera_switch *sw) +void prestera_devlink_traps_unregister(struct prestera_switch *sw) { + struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); const struct devlink_trap *trap; int i; @@ -638,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw) devlink_trap_groups_unregister(dl, prestera_trap_groups_arr, ARRAY_SIZE(prestera_trap_groups_arr)); + kfree(trap_data->trap_items_arr); + kfree(trap_data); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h index cc34c3db13a2..b322295bad3a 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -9,7 +9,7 @@ struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); void prestera_devlink_free(struct prestera_switch *sw); -int prestera_devlink_register(struct prestera_switch *sw); +void prestera_devlink_register(struct prestera_switch *sw); void prestera_devlink_unregister(struct prestera_switch *sw); int prestera_devlink_port_register(struct prestera_port *port); @@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev); void prestera_devlink_trap_report(struct prestera_port *port, struct sk_buff *skb, u8 cpu_code); +int prestera_devlink_traps_register(struct prestera_switch *sw); +void prestera_devlink_traps_unregister(struct prestera_switch *sw); #endif /* _PRESTERA_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 44c670807fb3..b667f560b931 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -137,7 +137,7 @@ static int prestera_port_set_mac_address(struct net_device *dev, void *p) if (err) return err; - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -851,7 +851,7 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_span_init; - err = prestera_devlink_register(sw); + err = prestera_devlink_traps_register(sw); if (err) goto err_dl_register; @@ -863,12 +863,13 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_ports_create; + prestera_devlink_register(sw); return 0; err_ports_create: prestera_lag_fini(sw); err_lag_init: - prestera_devlink_unregister(sw); + prestera_devlink_traps_unregister(sw); err_dl_register: prestera_span_fini(sw); err_span_init: @@ -888,9 +889,10 @@ err_swdev_register: static void prestera_switch_fini(struct prestera_switch *sw) { + prestera_devlink_unregister(sw); prestera_destroy_ports(sw); prestera_lag_fini(sw); - prestera_devlink_unregister(sw); + prestera_devlink_traps_unregister(sw); prestera_span_fini(sw); prestera_acl_fini(sw); prestera_event_handlers_unregister(sw); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index fab53c9b8380..bb5341020803 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -389,7 +389,7 @@ static void inverse_every_nibble(unsigned char *mac_addr) * Outputs * return the calculated entry. */ -static u32 hash_function(unsigned char *mac_addr_orig) +static u32 hash_function(const unsigned char *mac_addr_orig) { u32 hash_result; u32 addr0; @@ -434,7 +434,7 @@ static u32 hash_function(unsigned char *mac_addr_orig) * -ENOSPC if table full */ static int add_del_hash_entry(struct pxa168_eth_private *pep, - unsigned char *mac_addr, + const unsigned char *mac_addr, u32 rd, u32 skip, int del) { struct addr_table_entry *entry, *start; @@ -521,7 +521,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep, */ static void update_hash_table_mac_address(struct pxa168_eth_private *pep, unsigned char *oaddr, - unsigned char *addr) + const unsigned char *addr) { /* Delete old entry */ if (oaddr) @@ -607,7 +607,7 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); mac_h = dev->dev_addr[0] << 24; mac_h |= dev->dev_addr[1] << 16; @@ -1434,7 +1434,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); - err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr); + err = of_get_ethdev_address(pdev->dev.of_node, dev); if (err) { /* try reading the mac address, if set by the bootloader */ pxa168_eth_get_mac_address(dev, dev->dev_addr); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 051dd3fb5b03..ac48dcca268c 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3459,7 +3459,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) { memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index e9fc74e54b22..0da18b3f1c01 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -3817,7 +3817,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); memcpy_toio(hw->regs + B2_MAC_1 + port * 8, dev->dev_addr, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_2 + port * 8, @@ -4440,86 +4440,6 @@ static const struct ethtool_ops sky2_ethtool_ops = { static struct dentry *sky2_debug; - -/* - * Read and parse the first part of Vital Product Data - */ -#define VPD_SIZE 128 -#define VPD_MAGIC 0x82 - -static const struct vpd_tag { - char tag[2]; - char *label; -} vpd_tags[] = { - { "PN", "Part Number" }, - { "EC", "Engineering Level" }, - { "MN", "Manufacturer" }, - { "SN", "Serial Number" }, - { "YA", "Asset Tag" }, - { "VL", "First Error Log Message" }, - { "VF", "Second Error Log Message" }, - { "VB", "Boot Agent ROM Configuration" }, - { "VE", "EFI UNDI Configuration" }, -}; - -static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) -{ - size_t vpd_size; - loff_t offs; - u8 len; - unsigned char *buf; - u16 reg2; - - reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); - vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); - - seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); - buf = kmalloc(vpd_size, GFP_KERNEL); - if (!buf) { - seq_puts(seq, "no memory!\n"); - return; - } - - if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { - seq_puts(seq, "VPD read failed\n"); - goto out; - } - - if (buf[0] != VPD_MAGIC) { - seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); - goto out; - } - len = buf[1]; - if (len == 0 || len > vpd_size - 4) { - seq_printf(seq, "Invalid id length: %d\n", len); - goto out; - } - - seq_printf(seq, "%.*s\n", len, buf + 3); - offs = len + 3; - - while (offs < vpd_size - 4) { - int i; - - if (!memcmp("RW", buf + offs, 2)) /* end marker */ - break; - len = buf[offs + 2]; - if (offs + len + 3 >= vpd_size) - break; - - for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { - if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { - seq_printf(seq, " %s: %.*s\n", - vpd_tags[i].label, len, buf + offs + 3); - break; - } - } - offs += len + 3; - } -out: - kfree(buf); -} - static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; @@ -4529,9 +4449,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) unsigned idx, last; int sop; - sky2_show_vpd(seq, hw); - - seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", + seq_printf(seq, "IRQ src=%x mask=%x control=%x\n", sky2_read32(hw, B0_ISRC), sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); @@ -4802,7 +4720,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, * 1) from device tree data * 2) from internal registers set by bootloader */ - ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr); + ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev); if (ret) memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 398c23cec815..75d67d1b5f6b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2588,7 +2588,7 @@ static int __init mtk_init(struct net_device *dev) struct mtk_eth *eth = mac->hw; int ret; - ret = of_get_mac_address(mac->of_node, dev->dev_addr); + ret = of_get_ethdev_address(mac->of_node, dev); if (ret) { /* If the mac address is invalid, use random mac address */ eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 1d5dd2015453..89ca7960b225 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -523,7 +523,7 @@ static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv) static void mtk_star_set_mac_addr(struct net_device *ndev) { struct mtk_star_priv *priv = netdev_priv(ndev); - u8 *mac_addr = ndev->dev_addr; + const u8 *mac_addr = ndev->dev_addr; unsigned int high, low; high = mac_addr[0] << 8 | mac_addr[1] << 0; @@ -1544,7 +1544,7 @@ static int mtk_star_probe(struct platform_device *pdev) if (ret) return ret; - ret = eth_platform_get_mac_address(dev, ndev->dev_addr); + ret = platform_get_ethdev_address(dev, ndev); if (ret || !is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 8d751383530b..e10b7b04b894 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2480,7 +2480,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) return 0; err_thread: - flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); err_slaves: while (i--) { @@ -2587,7 +2586,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) int i, port; if (mlx4_is_master(dev)) { - flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); for (i = 0; i < dev->num_slaves; i++) { for (port = 1; port <= MLX4_MAX_PORTS; port++) @@ -3009,7 +3007,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac) return -EPERM; } - s_info->mac = mlx4_mac_to_u64(mac); + s_info->mac = ether_addr_to_u64(mac); mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", vf, port, s_info->mac); return 0; @@ -3195,7 +3193,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; - mlx4_u64_to_mac(mac, s_info->mac); + u64_to_ether_addr(s_info->mac, mac); if (setting && !is_valid_ether_addr(mac)) { mlx4_info(dev, "Illegal MAC with spoofchk\n"); return -EPERM; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index f7053a74e6a8..4d4f9cf9facb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) buf += PAGE_SIZE; } } else { - err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? + err = copy_to_user((void __user *)buf, init_ents, + array_size(entries, cqe_size)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ef518b1040f7..66c8ae29bc7a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -197,6 +197,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = { /* xdp statistics */ "rx_xdp_drop", + "rx_xdp_redirect", + "rx_xdp_redirect_fail", "rx_xdp_tx", "rx_xdp_tx_full", @@ -428,6 +430,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, data[index++] = priv->rx_ring[i]->bytes; data[index++] = priv->rx_ring[i]->dropped; data[index++] = priv->rx_ring[i]->xdp_drop; + data[index++] = priv->rx_ring[i]->xdp_redirect; + data[index++] = priv->rx_ring[i]->xdp_redirect_fail; data[index++] = priv->rx_ring[i]->xdp_tx; data[index++] = priv->rx_ring[i]->xdp_tx_full; } @@ -520,6 +524,10 @@ static void mlx4_en_get_strings(struct net_device *dev, sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_drop", i); sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_redirect", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_redirect_fail", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_tx", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_tx_full", i); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 109472d6b61f..f1259bdb1a29 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -237,7 +237,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) if (mdev->pndev[i]) mlx4_en_destroy_netdev(mdev->pndev[i]); - flush_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue); (void) mlx4_mr_free(dev, &mdev->mr); iounmap(mdev->uar_map); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8af7f2827322..3f6d5c384637 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -527,18 +527,17 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, return err; } -static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) +static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac) { - int i; - for (i = ETH_ALEN - 1; i >= 0; --i) { - dst_mac[i] = src_mac & 0xff; - src_mac >>= 8; - } - memset(&dst_mac[ETH_ALEN], 0, 2); + u8 addr[ETH_ALEN]; + + u64_to_ether_addr(src_mac, addr); + eth_hw_addr_set(dev, addr); } -static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, +static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, + const unsigned char *addr, int qpn, u64 *reg_id) { int err; @@ -559,7 +558,7 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, - unsigned char *mac, int *qpn, u64 *reg_id) + const unsigned char *mac, int *qpn, u64 *reg_id) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; @@ -611,7 +610,8 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, } static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, - unsigned char *mac, int qpn, u64 reg_id) + const unsigned char *mac, + int qpn, u64 reg_id) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; @@ -644,7 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) int index = 0; int err = 0; int *qpn = &priv->base_qpn; - u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); + u64 mac = ether_addr_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", priv->dev->dev_addr); @@ -683,7 +683,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) int qpn = priv->base_qpn; if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { - u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); + u64 mac = ether_addr_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", priv->dev->dev_addr); mlx4_unregister_mac(dev, priv->port, mac); @@ -701,14 +701,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int err = 0; - u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); + u64 new_mac_u64 = ether_addr_to_u64(new_mac); if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { struct hlist_head *bucket; unsigned int mac_hash; struct mlx4_mac_entry *entry; struct hlist_node *tmp; - u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); + u64 prev_mac_u64 = ether_addr_to_u64(prev_mac); bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { @@ -797,7 +797,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr) if (err) goto out; - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, saddr->sa_data); mlx4_en_update_user_mac(priv, new_mac); out: mutex_unlock(&mdev->state_lock); @@ -1076,7 +1076,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, mlx4_en_cache_mclist(dev); netif_addr_unlock_bh(dev); list_for_each_entry(mclist, &priv->mc_list, list) { - mcast_addr = mlx4_mac_to_u64(mclist->addr); + mcast_addr = ether_addr_to_u64(mclist->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } @@ -1169,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, found = true; if (!found) { - mac = mlx4_mac_to_u64(entry->mac); + mac = ether_addr_to_u64(entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, priv->base_qpn, entry->reg_id); @@ -1212,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; break; } - mac = mlx4_mac_to_u64(ha->addr); + mac = ether_addr_to_u64(ha->addr); memcpy(entry->mac, ha->addr, ETH_ALEN); err = mlx4_register_mac(mdev->dev, priv->port, mac); if (err < 0) { @@ -1348,7 +1348,7 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { bucket = &priv->mac_hash[i]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { - mac = mlx4_mac_to_u64(entry->mac); + mac = ether_addr_to_u64(entry->mac); en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, @@ -3267,7 +3267,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* Set default MAC */ dev->addr_len = ETH_ALEN; - mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); + mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", priv->port, dev->dev_addr); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0158b88bea5b..532997eba698 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_complete = 0; priv->port_stats.rx_alloc_pages = 0; priv->xdp_stats.rx_xdp_drop = 0; + priv->xdp_stats.rx_xdp_redirect = 0; + priv->xdp_stats.rx_xdp_redirect_fail = 0; priv->xdp_stats.rx_xdp_tx = 0; priv->xdp_stats.rx_xdp_tx_full = 0; for (i = 0; i < priv->rx_ring_num; i++) { @@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); + priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect); + priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail); priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 7f6d3b82c29b..650e6a1844ae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct bpf_prog *xdp_prog; int cq_ring = cq->ring; bool doorbell_pending; + bool xdp_redir_flush; struct mlx4_cqe *cqe; struct xdp_buff xdp; int polled = 0; @@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp_prog = rcu_dereference_bh(ring->xdp_prog); xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); doorbell_pending = false; + xdp_redir_flush = false; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud switch (act) { case XDP_PASS: break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) { + ring->xdp_redirect++; + xdp_redir_flush = true; + frags[0].page = NULL; + goto next; + } + ring->xdp_redirect_fail++; + trace_xdp_exception(dev, xdp_prog, act); + goto xdp_drop_no_cnt; case XDP_TX: if (likely(!mlx4_en_xmit_frame(ring, frags, priv, length, cq_ring, @@ -897,6 +909,9 @@ next: break; } + if (xdp_redir_flush) + xdp_do_flush(); + if (likely(polled)) { if (doorbell_pending) { priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c56b9dba4c71..817f4154b86d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -130,6 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->bf_enabled = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); } + ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL; ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; @@ -753,8 +754,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring) #else iowrite32be( #endif - (__force u32)ring->doorbell_qpn, - ring->bf.uar->map + MLX4_SEND_DOORBELL); + (__force u32)ring->doorbell_qpn, ring->doorbell_address); } static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index dc4ac1a2b6b6..42c96c9d7fb1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -3105,7 +3105,7 @@ void mlx4_replace_zero_macs(struct mlx4_dev *dev) dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { eth_random_addr(mac_addr); dev->port_random_macs |= 1 << i; - dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); + dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr); } } EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5a6b0fcaf7f8..b187c210d4d6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4015,9 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->interface_state_mutex); mutex_init(&dev->persist->pci_status_mutex); - ret = devlink_register(devlink); - if (ret) - goto err_persist_free; ret = devlink_params_register(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); if (ret) @@ -4027,17 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_params_unregister; - devlink_params_publish(devlink); - devlink_reload_enable(devlink); pci_save_state(pdev); + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); return 0; err_params_unregister: devlink_params_unregister(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); err_devlink_unregister: - devlink_unregister(devlink); -err_persist_free: kfree(dev->persist); err_devlink_free: devlink_free(devlink); @@ -4140,7 +4135,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; - devlink_reload_disable(devlink); + devlink_unregister(devlink); if (mlx4_is_slave(dev)) persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; @@ -4176,7 +4171,6 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_pci_disable_device(dev); devlink_params_unregister(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); - devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f1b4ad9c66d2..f1716a83a4d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1046,7 +1046,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) } EXPORT_SYMBOL_GPL(mlx4_flow_detach); -int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr, int port, int qpn, u16 prio, u64 *reg_id) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 6bf558c5ec10..e132ff4c82f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -283,6 +283,7 @@ struct mlx4_en_tx_ring { struct mlx4_bf bf; /* Following part should be mostly read */ + void __iomem *doorbell_address; __be32 doorbell_qpn; __be32 mr_key; u32 size; /* number of TXBBs */ @@ -340,6 +341,8 @@ struct mlx4_en_rx_ring { unsigned long csum_complete; unsigned long rx_alloc_pages; unsigned long xdp_drop; + unsigned long xdp_redirect; + unsigned long xdp_redirect_fail; unsigned long xdp_tx; unsigned long xdp_tx_full; unsigned long dropped; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 7b51ae8cf759..e9cd4bb6f83d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -42,9 +42,11 @@ struct mlx4_en_port_stats { struct mlx4_en_xdp_stats { unsigned long rx_xdp_drop; + unsigned long rx_xdp_redirect; + unsigned long rx_xdp_redirect_fail; unsigned long rx_xdp_tx; unsigned long rx_xdp_tx_full; -#define NUM_XDP_STATS 3 +#define NUM_XDP_STATS 5 }; struct mlx4_en_phy_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index db5dfff585c9..4dc3a822907a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -2058,7 +2058,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) return -EINVAL; } - cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL); + cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); if (!cmd->stats) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index dcf9f27ba2ef..a85341a41cd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); + struct pci_dev *pdev = dev->pdev; bool sf_dev_allocated; sf_dev_allocated = mlx5_sf_dev_allocated(dev); @@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return -EOPNOTSUPP; } + if (pci_num_vf(pdev)) { + NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); + } + switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: mlx5_unload_one(dev); @@ -625,7 +630,6 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, value); - devlink_param_publish(devlink, &enable_eth_param); return 0; } @@ -636,7 +640,6 @@ static void mlx5_devlink_eth_param_unregister(struct devlink *devlink) if (!mlx5_eth_supported(dev)) return; - devlink_param_unpublish(devlink, &enable_eth_param); devlink_param_unregister(devlink, &enable_eth_param); } @@ -672,7 +675,6 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, value); - devlink_param_publish(devlink, &enable_rdma_param); return 0; } @@ -681,7 +683,6 @@ static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return; - devlink_param_unpublish(devlink, &enable_rdma_param); devlink_param_unregister(devlink, &enable_rdma_param); } @@ -706,7 +707,6 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, value); - devlink_param_publish(devlink, &enable_rdma_param); return 0; } @@ -717,7 +717,6 @@ static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink) if (!mlx5_vnet_supported(dev)) return; - devlink_param_unpublish(devlink, &enable_vnet_param); devlink_param_unregister(devlink, &enable_vnet_param); } @@ -797,18 +796,15 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink) int mlx5_devlink_register(struct devlink *devlink) { + struct mlx5_core_dev *dev = devlink_priv(devlink); int err; - err = devlink_register(devlink); - if (err) - return err; - err = devlink_params_register(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); if (err) - goto params_reg_err; + return err; + mlx5_devlink_set_params_init_values(devlink); - devlink_params_publish(devlink); err = mlx5_devlink_auxdev_params_register(devlink); if (err) @@ -818,6 +814,9 @@ int mlx5_devlink_register(struct devlink *devlink) if (err) goto traps_reg_err; + if (!mlx5_core_is_mp_slave(dev)) + devlink_set_features(devlink, DEVLINK_F_RELOAD); + return 0; traps_reg_err: @@ -825,8 +824,6 @@ traps_reg_err: auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); -params_reg_err: - devlink_unregister(devlink); return err; } @@ -834,8 +831,6 @@ void mlx5_devlink_unregister(struct devlink *devlink) { mlx5_devlink_traps_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink); - devlink_params_unpublish(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); - devlink_unregister(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index f9cf9fb31547..da1bec04efff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1069,7 +1069,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) mlx5_fw_tracer_clean_saved_traces_array(tracer); mlx5_fw_tracer_free_strings_db(tracer); mlx5_fw_tracer_destroy_log_buf(tracer); - flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); kvfree(tracer); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 03a7a4ce5cd5..a3a4fece0cac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -220,8 +220,6 @@ struct mlx5e_umr_wqe { struct mlx5_mtt inline_mtts[0]; }; -extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER, MLX5E_PFLAG_TX_CQE_BASED_MODER, @@ -253,6 +251,9 @@ struct mlx5e_params { u16 mode; u8 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; + struct { + struct mlx5e_mqprio_rl *rl; + } channel; } mqprio; bool rx_cqe_compress_def; bool tunneled_offload_en; @@ -879,6 +880,7 @@ struct mlx5e_priv { #endif struct mlx5e_scratchpad scratchpad; struct mlx5e_htb htb; + struct mlx5e_mqprio_rl *mqprio_rl; }; struct mlx5e_rx_handlers { @@ -918,6 +920,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); +int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); @@ -1003,7 +1006,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p); int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index e8a8d78e3e4d..50977f01a050 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -7,6 +7,21 @@ #define BYTES_IN_MBIT 125000 +int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes) +{ + if (nbytes < BYTES_IN_MBIT) { + qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n", + nbytes, BYTES_IN_MBIT); + return -EINVAL; + } + return 0; +} + +static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes) +{ + return div_u64(nbytes, BYTES_IN_MBIT); +} + int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) { return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); @@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs if (err) goto err_free_sq; err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, - ¶m_sq, sq, 0, node->hw_id, node->qid); + ¶m_sq, sq, 0, node->hw_id, + priv->htb.qos_sq_stats[node->qid]); if (err) goto err_close_cq; @@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce return err; } + +struct mlx5e_mqprio_rl { + struct mlx5_core_dev *mdev; + u32 root_id; + u32 *leaves_id; + u8 num_tc; +}; + +struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void) +{ + return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL); +} + +void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl) +{ + kvfree(rl); +} + +int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, + u64 max_rate[]) +{ + int err; + int tc; + + if (!mlx5_qos_is_supported(mdev)) { + qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device."); + return -EOPNOTSUPP; + } + if (num_tc > mlx5e_qos_max_leaf_nodes(mdev)) + return -EINVAL; + + rl->mdev = mdev; + rl->num_tc = num_tc; + rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL); + if (!rl->leaves_id) + return -ENOMEM; + + err = mlx5_qos_create_root_node(mdev, &rl->root_id); + if (err) + goto err_free_leaves; + + qos_dbg(mdev, "Root created, id %#x\n", rl->root_id); + + for (tc = 0; tc < num_tc; tc++) { + u32 max_average_bw; + + max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]); + err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw, + &rl->leaves_id[tc]); + if (err) + goto err_destroy_leaves; + + qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n", + tc, rl->leaves_id[tc], max_average_bw); + } + return 0; + +err_destroy_leaves: + while (--tc >= 0) + mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]); + mlx5_qos_destroy_node(mdev, rl->root_id); +err_free_leaves: + kvfree(rl->leaves_id); + return err; +} + +void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl) +{ + int tc; + + for (tc = 0; tc < rl->num_tc; tc++) + mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]); + mlx5_qos_destroy_node(rl->mdev, rl->root_id); + kvfree(rl->leaves_id); +} + +int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id) +{ + if (tc >= rl->num_tc) + return -EINVAL; + + *hw_id = rl->leaves_id[tc]; + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h index 757682b7c0e0..b7558907ba20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h @@ -12,6 +12,7 @@ struct mlx5e_priv; struct mlx5e_channels; struct mlx5e_channel; +int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes); int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv); @@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack); +/* MQPRIO TX rate limit */ +struct mlx5e_mqprio_rl; +struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void); +void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl); +int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, + u64 max_rate[]); +void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl); +int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index de03684528bb..398c6761eeb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -3,6 +3,7 @@ #include <net/dst_metadata.h> #include <linux/netdevice.h> +#include <linux/if_macvlan.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/rtnetlink.h> @@ -409,6 +410,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv) static LIST_HEAD(mlx5e_block_cb_list); +static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PASSTHRU; +} + static int mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct mlx5e_rep_priv *rpriv, @@ -422,8 +430,14 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct flow_block_cb *block_cb; if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && - !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) - return -EOPNOTSUPP; + !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) { + if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev)) + return -EOPNOTSUPP; + if (!mlx5e_rep_macvlan_mode_supported(netdev)) { + netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode"); + return -EOPNOTSUPP; + } + } if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; @@ -647,9 +661,7 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk "Failed to restore tunnel info for sampled packet\n"); return; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_skb(skb, mapped_obj); -#endif /* CONFIG_MLX5_TC_SAMPLE */ mlx5_rep_tc_post_napi_receive(tc_priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index 625cd49ef96c..b8b481b335cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, return 0; } -static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) { int err; @@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r if (err) mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n", mlx5e_rqt_get_rqtn(&rss->rqt), err); + return err; } void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) @@ -490,6 +491,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, { bool changed_indir = false; bool changed_hash = false; + struct mlx5e_rss *old_rss; + int err = 0; + + old_rss = mlx5e_rss_alloc(); + if (!old_rss) + return -ENOMEM; + + *old_rss = *rss; if (hfunc && *hfunc != rss->hash.hfunc) { switch (*hfunc) { @@ -497,7 +506,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, case ETH_RSS_HASH_TOP: break; default: - return -EINVAL; + err = -EINVAL; + goto out; } changed_hash = true; changed_indir = true; @@ -520,13 +530,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, rss->indir.table[i] = indir[i]; } - if (changed_indir && rss->enabled) - mlx5e_rss_apply(rss, rqns, num_rqns); + if (changed_indir && rss->enabled) { + err = mlx5e_rss_apply(rss, rqns, num_rqns); + if (err) { + *rss = *old_rss; + goto out; + } + } if (changed_hash) mlx5e_rss_update_tirs(rss); - return 0; +out: + mlx5e_rss_free(old_rss); + return err; } struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index 6552ecee3f9b..d1d7e4b9f7ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -602,7 +602,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, } sample_flow->pre_attr = pre_attr; - return sample_flow->post_rule; + return sample_flow->pre_rule; err_pre_offload_rule: kfree(pre_attr); @@ -613,7 +613,7 @@ err_sample_restore: err_obj_id: sampler_put(tc_psample, sample_flow->sampler); err_sampler: - if (!post_act_handle) + if (sample_flow->post_rule) del_post_rule(esw, sample_flow, attr); err_post_rule: if (post_act_handle) @@ -628,9 +628,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { - struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5e_sample_flow *sample_flow; - struct mlx5_vport_tbl_attr tbl_attr; struct mlx5_eswitch *esw; if (IS_ERR_OR_NULL(tc_psample)) @@ -650,23 +648,14 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, */ sample_flow = attr->sample_attr->sample_flow; mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr); - if (!sample_flow->post_act_handle) - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, - sample_flow->post_attr); sample_restore_put(tc_psample, sample_flow->restore); mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id); sampler_put(tc_psample, sample_flow->sampler); - if (sample_flow->post_act_handle) { + if (sample_flow->post_act_handle) mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle); - } else { - tbl_attr.chain = attr->chain; - tbl_attr.prio = attr->prio; - tbl_attr.vport = esw_attr->in_rep->vport; - tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; - mlx5_esw_vporttbl_put(esw, &tbl_attr); - kfree(sample_flow->post_attr); - } + else + del_post_rule(esw, sample_flow, attr); kfree(sample_flow->pre_attr); kfree(sample_flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h index db0146df9b30..9ef8a49d7801 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h @@ -19,6 +19,8 @@ struct mlx5e_sample_attr { struct mlx5e_sample_flow *sample_flow; }; +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); struct mlx5_flow_handle * @@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act); void mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample); +#else /* CONFIG_MLX5_TC_SAMPLE */ + +static inline struct mlx5_flow_handle * +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, + u32 tunnel_id) +{ return ERR_PTR(-EOPNOTSUPP); } + +static inline void +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr) {} + +static inline struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act) +{ return ERR_PTR(-EOPNOTSUPP); } + +static inline void +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {} + +static inline void +mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {} + +#endif /* CONFIG_MLX5_TC_SAMPLE */ #endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 6c949abcd2e1..225748a9e52a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2127,12 +2127,20 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); - rhashtable_init(&ct_priv->zone_ht, &zone_params); - rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); - rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params); + if (rhashtable_init(&ct_priv->zone_ht, &zone_params)) + goto err_ct_zone_ht; + if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params)) + goto err_ct_tuples_ht; + if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params)) + goto err_ct_tuples_nat_ht; return ct_priv; +err_ct_tuples_nat_ht: + rhashtable_destroy(&ct_priv->ct_tuples_ht); +err_ct_tuples_ht: + rhashtable_destroy(&ct_priv->zone_ht); +err_ct_zone_ht: err_ct_nat_tbl: mlx5_chains_destroy_global_table(chains, ct_priv->ct); err_ct_tbl: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index b4e986818794..cc7d7b895b80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -118,6 +118,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); attr->fl.fl4.flowi4_oif = uplink_dev->ifindex; + } else { + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev); + + if (tunnel && tunnel->get_remote_ifindex) + attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev); } rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4); @@ -435,12 +440,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_tc_tun_route_attr *attr) { + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev); struct net_device *route_dev; struct net_device *out_dev; struct dst_entry *dst; struct neighbour *n; int ret; + if (tunnel && tunnel->get_remote_ifindex) + attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev); dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6, NULL); if (IS_ERR(dst)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 9350ca05ce65..aa092eaeaec3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel { void *headers_v); bool (*encap_info_equal)(struct mlx5e_encap_key *a, struct mlx5e_encap_key *b); + int (*get_remote_ifindex)(struct net_device *mirred_dev); }; extern struct mlx5e_tc_tunnel vxlan_tunnel; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 4267f3a1059e..fd07c4cbfd1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, return 0; } +static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev) +{ + const struct vxlan_dev *vxlan = netdev_priv(mirred_dev); + const struct vxlan_rdst *dst = &vxlan->default_dst; + + return dst->remote_ifindex; +} + struct mlx5e_tc_tunnel vxlan_tunnel = { .tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN, .match_level = MLX5_MATCH_L4, @@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = { .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan, .parse_tunnel = mlx5e_tc_tun_parse_vxlan, .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic, + .get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 9d451b8ee467..7a97e0e21fd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) break; case ETH_SS_TEST: - for (i = 0; i < mlx5e_self_test_num(priv); i++) - strcpy(data + i * ETH_GSTRING_LEN, - mlx5e_self_tests[i]); + mlx5e_self_test_fill_strings(priv, data); break; case ETH_SS_STATS: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index c06b4b938ae7..73a377c8a2da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -71,12 +71,12 @@ struct mlx5e_l2_hash_node { bool mpfs; }; -static inline int mlx5e_hash_l2(u8 *addr) +static inline int mlx5e_hash_l2(const u8 *addr) { return addr[5]; } -static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr) +static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr) { struct mlx5e_l2_hash_node *hn; int ix = mlx5e_hash_l2(addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 09c8b71b186c..6d8fde1314e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -930,9 +930,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + size_t size; - xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, - GFP_KERNEL, numa); + size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq); + xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); if (!xdpi_fifo->xi) return -ENOMEM; @@ -946,10 +947,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + size_t size; int err; - sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, - GFP_KERNEL, numa); + size = array_size(sizeof(*sq->db.wqe_info), wq_sz); + sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); if (!sq->db.wqe_info) return -ENOMEM; @@ -1298,7 +1300,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; @@ -1308,10 +1311,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, if (err) return err; - if (qos_queue_group_id) - sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; - else - sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + sq->stats = sq_stats; csp.tisn = tisn; csp.tis_lst_sz = 1; @@ -1705,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) mlx5e_close_cq(&c->sq[tc].cq); } +static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq) +{ + int tc; + + for (tc = 0; tc < TC_MAX_QUEUE; tc++) + if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count) + return tc; + + WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq); + return -ENOENT; +} + +static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, + u32 *hw_id) +{ + int tc; + + if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL || + !params->mqprio.channel.rl) { + *hw_id = 0; + return 0; + } + + tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix); + if (tc < 0) + return tc; + + return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id); +} + static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1713,9 +1743,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; + u32 qos_queue_group_id; + + err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id); + if (err) + goto err_close_sqs; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); + params, &cparam->txq_sq, &c->sq[tc], tc, + qos_queue_group_id, + &c->priv->channel_stats[c->ix].sq[tc]); if (err) goto err_close_sqs; } @@ -2340,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); goto err_txqs; } + if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) { + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + priv->mqprio_rl = priv->channels.params.mqprio.channel.rl; + } return 0; @@ -2901,15 +2945,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) { params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.num_tc = num_tc; + params->mqprio.channel.rl = NULL; mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, params->num_channels); } static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, - struct tc_mqprio_qopt *qopt) + struct tc_mqprio_qopt *qopt, + struct mlx5e_mqprio_rl *rl) { params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.num_tc = qopt->num_tc; + params->mqprio.channel.rl = rl; mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); } @@ -2969,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, netdev_err(netdev, "Min tx rate is not supported\n"); return -EINVAL; } + if (mqprio->max_rate[i]) { - netdev_err(netdev, "Max tx rate is not supported\n"); - return -EINVAL; + int err; + + err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]); + if (err) + return err; } if (mqprio->qopt.offset[i] != agg_count) { @@ -2990,11 +3041,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, return 0; } +static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio) +{ + int tc; + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) + if (mqprio->max_rate[tc]) + return true; + return false; +} + static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, struct tc_mqprio_qopt_offload *mqprio) { mlx5e_fp_preactivate preactivate; struct mlx5e_params new_params; + struct mlx5e_mqprio_rl *rl; bool nch_changed; int err; @@ -3002,13 +3064,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, if (err) return err; + rl = NULL; + if (mlx5e_mqprio_rate_limit(mqprio)) { + rl = mlx5e_mqprio_rl_alloc(); + if (!rl) + return -ENOMEM; + err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc, + mqprio->max_rate); + if (err) { + mlx5e_mqprio_rl_free(rl); + return err; + } + } + new_params = priv->channels.params; - mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); + mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl); nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : mlx5e_update_netdev_queues_ctx; - return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + if (err && rl) { + mlx5e_mqprio_rl_cleanup(rl); + mlx5e_mqprio_rl_free(rl); + } + + return err; } static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, @@ -3226,7 +3307,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) return -EADDRNOTAVAIL; netif_addr_lock_bh(netdev); - ether_addr_copy(netdev->dev_addr, saddr->sa_data); + eth_hw_addr_set(netdev, saddr->sa_data); netif_addr_unlock_bh(netdev); mlx5e_nic_set_rx_mode(priv); @@ -4855,6 +4936,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats); + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + memset(priv, 0, sizeof(*priv)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index ce8ab1f01876..8c9163d2c646 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -35,30 +35,7 @@ #include <net/udp.h> #include "en.h" #include "en/port.h" - -enum { - MLX5E_ST_LINK_STATE, - MLX5E_ST_LINK_SPEED, - MLX5E_ST_HEALTH_INFO, -#ifdef CONFIG_INET - MLX5E_ST_LOOPBACK, -#endif - MLX5E_ST_NUM, -}; - -const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = { - "Link Test", - "Speed Test", - "Health Test", -#ifdef CONFIG_INET - "Loopback Test", -#endif -}; - -int mlx5e_self_test_num(struct mlx5e_priv *priv) -{ - return ARRAY_SIZE(mlx5e_self_tests); -} +#include "eswitch.h" static int mlx5e_test_health_info(struct mlx5e_priv *priv) { @@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, mlx5e_refresh_tirs(priv, false, false); } +static int mlx5e_cond_loopback(struct mlx5e_priv *priv) +{ + if (is_mdev_switchdev_mode(priv->mdev)) + return -EOPNOTSUPP; + + return 0; +} + #define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) static int mlx5e_test_loopback(struct mlx5e_priv *priv) { @@ -313,37 +298,47 @@ out: } #endif -static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = { - mlx5e_test_link_state, - mlx5e_test_link_speed, - mlx5e_test_health_info, +typedef int (*mlx5e_st_func)(struct mlx5e_priv *); + +struct mlx5e_st { + char name[ETH_GSTRING_LEN]; + mlx5e_st_func st_func; + mlx5e_st_func cond_func; +}; + +static struct mlx5e_st mlx5e_sts[] = { + { "Link Test", mlx5e_test_link_state }, + { "Speed Test", mlx5e_test_link_speed }, + { "Health Test", mlx5e_test_health_info }, #ifdef CONFIG_INET - mlx5e_test_loopback, + { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback }, #endif }; +#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts) + void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) { struct mlx5e_priv *priv = netdev_priv(ndev); - int i; - - memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM); + int i, count = 0; mutex_lock(&priv->state_lock); netdev_info(ndev, "Self test begin..\n"); for (i = 0; i < MLX5E_ST_NUM; i++) { - netdev_info(ndev, "\t[%d] %s start..\n", - i, mlx5e_self_tests[i]); - buf[i] = mlx5e_st_func[i](priv); - netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", - i, mlx5e_self_tests[i], buf[i]); + struct mlx5e_st st = mlx5e_sts[i]; + + if (st.cond_func && st.cond_func(priv)) + continue; + netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); + buf[count] = st.st_func(priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); } mutex_unlock(&priv->state_lock); - for (i = 0; i < MLX5E_ST_NUM; i++) { + for (i = 0; i < count; i++) { if (buf[i]) { etest->flags |= ETH_TEST_FL_FAILED; break; @@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, netdev_info(ndev, "Self test out: status flags(0x%x)\n", etest->flags); } + +int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data) +{ + int i, count = 0; + + for (i = 0; i < MLX5E_ST_NUM; i++) { + struct mlx5e_st st = mlx5e_sts[i]; + + if (st.cond_func && st.cond_func(priv)) + continue; + if (data) + strcpy(data + count * ETH_GSTRING_LEN, st.name); + count++; + } + return count; +} + +int mlx5e_self_test_num(struct mlx5e_priv *priv) +{ + return mlx5e_self_test_fill_strings(priv, NULL); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ba8164792016..d92ee2f37c22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -39,6 +39,7 @@ #include <linux/rhashtable.h> #include <linux/refcount.h> #include <linux/completion.h> +#include <linux/if_macvlan.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> #include <net/psample.h> @@ -246,7 +247,6 @@ get_ct_priv(struct mlx5e_priv *priv) return priv->fs.tc.ct; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) static struct mlx5e_tc_psample * get_sample_priv(struct mlx5e_priv *priv) { @@ -263,7 +263,6 @@ get_sample_priv(struct mlx5e_priv *priv) return NULL; } -#endif struct mlx5_flow_handle * mlx5_tc_rule_insert(struct mlx5e_priv *priv, @@ -1146,11 +1145,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), flow, spec, attr, mod_hdr_acts); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) } else if (flow_flag_test(flow, SAMPLE)) { rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, mlx5e_tc_get_flow_tun_id(flow)); -#endif } else { rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } @@ -1186,12 +1183,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, return; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) if (flow_flag_test(flow, SAMPLE)) { mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); return; } -#endif if (attr->esw_attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@ -1688,8 +1683,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, if (opt->opt_class != htons(U16_MAX) || opt->type != U8_MAX) { - NL_SET_ERR_MSG(extack, - "Partial match of tunnel options in chain > 0 isn't supported"); + NL_SET_ERR_MSG_MOD(extack, + "Partial match of tunnel options in chain > 0 isn't supported"); netdev_warn(priv->netdev, "Partial match of tunnel options in chain > 0 isn't supported"); return -EOPNOTSUPP; @@ -1905,8 +1900,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, if ((needs_mapping || sets_mapping) && !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { - NL_SET_ERR_MSG(extack, - "Chains on tunnel devices isn't supported without register loopback support"); + NL_SET_ERR_MSG_MOD(extack, + "Chains on tunnel devices isn't supported without register loopback support"); netdev_warn(priv->netdev, "Chains on tunnel devices isn't supported without register loopback support"); return -EOPNOTSUPP; @@ -2910,8 +2905,7 @@ out_err: } static int -parse_pedit_to_reformat(struct mlx5e_priv *priv, - const struct flow_action_entry *act, +parse_pedit_to_reformat(const struct flow_action_entry *act, struct mlx5e_tc_flow_parse_attr *parse_attr, struct netlink_ext_ack *extack) { @@ -2943,7 +2937,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) - return parse_pedit_to_reformat(priv, act, parse_attr, extack); + return parse_pedit_to_reformat(act, parse_attr, extack); return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack); @@ -3025,10 +3019,10 @@ struct ipv6_hoplimit_word { __u8 hop_limit; }; -static int is_action_keys_supported(const struct flow_action_entry *act, - bool ct_flow, bool *modify_ip_header, - bool *modify_tuple, - struct netlink_ext_ack *extack) +static bool +is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow, + bool *modify_ip_header, bool *modify_tuple, + struct netlink_ext_ack *extack) { u32 mask, offset; u8 htype; @@ -3056,7 +3050,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv4 address with action ct"); - return -EOPNOTSUPP; + return false; } } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { struct ipv6_hoplimit_word *hoplimit_word = @@ -3074,7 +3068,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv6 address with action ct"); - return -EOPNOTSUPP; + return false; } } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) { @@ -3082,11 +3076,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of transport header ports with action ct"); - return -EOPNOTSUPP; + return false; } } - return 0; + return true; } static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, @@ -3133,7 +3127,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, void *headers_v; u16 ethertype; u8 ip_proto; - int i, err; + int i; headers_c = get_match_headers_criteria(actions, spec); headers_v = get_match_headers_value(actions, spec); @@ -3151,11 +3145,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, act->id != FLOW_ACTION_ADD) continue; - err = is_action_keys_supported(act, ct_flow, - &modify_ip_header, - &modify_tuple, extack); - if (err) - return err; + if (!is_action_keys_supported(act, ct_flow, + &modify_ip_header, + &modify_tuple, extack)) + return false; } if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, @@ -3176,37 +3169,65 @@ out_ok: return true; } -static bool actions_match_supported(struct mlx5e_priv *priv, - struct flow_action *flow_action, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) +static bool +actions_match_supported_fdb(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { - bool ct_flow = false, ct_clear = false; - u32 actions; + struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; + bool ct_flow, ct_clear; - ct_clear = flow->attr->ct_attr.ct_action & - TCA_CT_ACT_CLEAR; + ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; ct_flow = flow_flag_test(flow, CT) && !ct_clear; - actions = flow->attr->action; - if (mlx5e_is_eswitch_flow(flow)) { - if (flow->attr->esw_attr->split_count && ct_flow && - !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) { - /* All registers used by ct are cleared when using - * split rules. - */ - NL_SET_ERR_MSG_MOD(extack, - "Can't offload mirroring with action ct"); - return false; - } + if (esw_attr->split_count && ct_flow && + !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) { + /* All registers used by ct are cleared when using + * split rules. + */ + NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct"); + return false; } - if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(priv, &parse_attr->spec, - flow_action, actions, - ct_flow, ct_clear, - extack); + if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "current firmware doesn't support split rule for port mirroring"); + netdev_warn_once(priv->netdev, + "current firmware doesn't support split rule for port mirroring\n"); + return false; + } + + return true; +} + +static bool +actions_match_supported(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + u32 actions = flow->attr->action; + bool ct_flow, ct_clear; + + ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; + ct_flow = flow_flag_test(flow, CT) && !ct_clear; + + if (!(actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); + return false; + } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + !modify_header_match_supported(priv, &parse_attr->spec, flow_action, + actions, ct_flow, ct_clear, extack)) + return false; + + if (mlx5e_is_eswitch_flow(flow) && + !actions_match_supported_fdb(priv, parse_attr, flow, extack)) + return false; return true; } @@ -3355,11 +3376,51 @@ static int validate_goto_chain(struct mlx5e_priv *priv, return 0; } -static int parse_tc_nic_actions(struct mlx5e_priv *priv, - struct flow_action *flow_action, +static int +actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct pedit_headers_action *hdrs, struct netlink_ext_ack *extack) { + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + enum mlx5_flow_namespace_type ns_type; + int err; + + if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits && + !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) + return 0; + + ns_type = get_flow_name_space(flow); + + err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs, + &attr->action, extack); + if (err) + return err; + + /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ + if (parse_attr->mod_hdr_acts.num_actions > 0) + return 0; + + attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + + if (ns_type != MLX5_FLOW_NAMESPACE_FDB) + return 0; + + if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) + attr->esw_attr->split_count = 0; + + return 0; +} + +static int +parse_tc_nic_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct pedit_headers_action hdrs[2] = {}; @@ -3451,7 +3512,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: @@ -3467,33 +3529,16 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, } } - if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || - hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { - err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, hdrs, &action, extack); - if (err) - return err; - /* in case all pedit actions are skipped, remove the MOD_HDR - * flag. - */ - if (parse_attr->mod_hdr_acts.num_actions == 0) { - action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); - } - } - attr->action = action; - if (attr->dest_chain) { - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (attr->dest_chain && parse_attr->mirred_ifindex[0]) { + NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); + return -EOPNOTSUPP; } - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); + if (err) + return err; if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; @@ -3765,6 +3810,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, flow_action_for_each(i, act, flow_action) { switch (act->id) { + case FLOW_ACTION_ACCEPT: + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT; + break; case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; @@ -3914,6 +3964,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return err; } + if (netif_is_macvlan(out_dev)) + out_dev = macvlan_dev_real_dev(out_dev); + err = verify_uplink_forwarding(priv, flow, out_dev, extack); if (err) return err; @@ -3998,7 +4051,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: @@ -4045,60 +4099,26 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return err; } - if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || - hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { - err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, hdrs, &action, extack); - if (err) - return err; - /* in case all pedit actions are skipped, remove the MOD_HDR - * flag. we might have set split_count either by pedit or - * pop/push. if there is no pop/push either, reset it too. - */ - if (parse_attr->mod_hdr_acts.num_actions == 0) { - action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); - if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || - (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) - esw_attr->split_count = 0; - } - } - attr->action = action; - if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) - return -EOPNOTSUPP; - - if (attr->dest_chain) { - if (decap) { - /* It can be supported if we'll create a mapping for - * the tunnel device only (without tunnel), and set - * this tunnel id with this decap flow. - * - * On restore (miss), we'll just set this saved tunnel - * device. - */ - - NL_SET_ERR_MSG(extack, - "Decap with goto isn't supported"); - netdev_warn(priv->netdev, - "Decap with goto isn't supported"); - return -EOPNOTSUPP; - } - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } + err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); + if (err) + return err; - if (!(attr->action & - (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { - NL_SET_ERR_MSG_MOD(extack, - "Rule must have at least one forward/drop action"); + if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; - } - if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { - NL_SET_ERR_MSG_MOD(extack, - "current firmware doesn't support split rule for port mirroring"); - netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); + if (attr->dest_chain && decap) { + /* It can be supported if we'll create a mapping for + * the tunnel device only (without tunnel), and set + * this tunnel id with this decap flow. + * + * On restore (miss), we'll just set this saved tunnel + * device. + */ + + NL_SET_ERR_MSG(extack, "Decap with goto isn't supported"); + netdev_warn(priv->netdev, "Decap with goto isn't supported"); return -EOPNOTSUPP; } @@ -5006,9 +5026,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) MLX5_FLOW_NAMESPACE_FDB, uplink_priv->post_act); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); -#endif mapping_id = mlx5_query_nic_system_image_guid(esw->dev); @@ -5022,9 +5040,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) } uplink_priv->tunnel_mapping = mapping; - /* 0xFFF is reserved for stack devices slow path table mark */ + /* Two last values are reserved for stack devices slow path table mark + * and bridge ingress push mark. + */ mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, - sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); + sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5052,9 +5072,7 @@ err_ht_init: err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); -#endif mlx5_tc_ct_clean(uplink_priv->ct_priv); netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); @@ -5074,9 +5092,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); mapping_destroy(uplink_priv->tunnel_mapping); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); -#endif mlx5_tc_ct_clean(uplink_priv->ct_priv); mlx5e_tc_post_act_destroy(uplink_priv->post_act); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 605c8ecc3610..792e0d6aa861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_eq_notifier_register(dev, &table->cq_err_nb); param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_NUM_CMD_EQE, .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; @@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_NUM_ASYNC_EQE, }; @@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) goto err2; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = /* TODO: sriov max_vf + */ 1, .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, }; @@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ncomp_eqs = table->num_comp_eqs; nent = MLX5_COMP_EQ_SIZE; for (i = 0; i < ncomp_eqs; i++) { - int vecidx = i + MLX5_IRQ_VEC_COMP_BASE; struct mlx5_eq_param param = {}; + int vecidx = i; eq = kzalloc(sizeof(*eq), GFP_KERNEL); if (!eq) { @@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev) goto err_out; } - vecidx = MLX5_IRQ_VEC_COMP_BASE; - for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE; - vecidx++) { + for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) { err = irq_cpu_rmap_add(eq_table->rmap, pci_irq_vector(mdev->pdev, vecidx)); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 7e221038df8d..ed72246d1d83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -28,7 +28,10 @@ #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1) #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1) -#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1) +#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2) +#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \ + (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1) +#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1) #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0 @@ -61,6 +64,9 @@ struct mlx5_esw_bridge { struct mlx5_flow_table *egress_ft; struct mlx5_flow_group *egress_vlan_fg; struct mlx5_flow_group *egress_mac_fg; + struct mlx5_flow_group *egress_miss_fg; + struct mlx5_pkt_reformat *egress_miss_pkt_reformat; + struct mlx5_flow_handle *egress_miss_handle; unsigned long ageing_time; u32 flags; }; @@ -86,6 +92,26 @@ mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry) SWITCHDEV_FDB_DEL_TO_BRIDGE); } +static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw) +{ + return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) && + MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) && + MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >= + offsetof(struct vlan_ethhdr, h_vlan_proto); +} + +static struct mlx5_pkt_reformat * +mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw) +{ + struct mlx5_pkt_reformat_params reformat_params = {}; + + reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR; + reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START; + reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto); + reformat_params.size = sizeof(struct vlan_hdr); + return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); +} + static struct mlx5_flow_table * mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw) { @@ -287,43 +313,74 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_ return fg; } +static struct mlx5_flow_group * +mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + u32 *in, *match; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return ERR_PTR(-ENOMEM); + + MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); + match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + + MLX5_SET(create_flow_group_in, in, start_flow_index, + MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM); + MLX5_SET(create_flow_group_in, in, end_flow_index, + MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO); + + fg = mlx5_create_flow_group(egress_ft, in); + if (IS_ERR(fg)) + esw_warn(esw->dev, + "Failed to create bridge egress table miss flow group (err=%ld)\n", + PTR_ERR(fg)); + kvfree(in); + return fg; +} + static int mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads) { struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg; struct mlx5_flow_table *ingress_ft, *skip_ft; + struct mlx5_eswitch *esw = br_offloads->esw; int err; - if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw)) + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) return -EOPNOTSUPP; ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE, MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE, - br_offloads->esw); + esw); if (IS_ERR(ingress_ft)) return PTR_ERR(ingress_ft); skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE, MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE, - br_offloads->esw); + esw); if (IS_ERR(skip_ft)) { err = PTR_ERR(skip_ft); goto err_skip_tbl; } - vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft); + vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft); if (IS_ERR(vlan_fg)) { err = PTR_ERR(vlan_fg); goto err_vlan_fg; } - filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft); + filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft); if (IS_ERR(filter_fg)) { err = PTR_ERR(filter_fg); goto err_filter_fg; } - mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft); + mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft); if (IS_ERR(mac_fg)) { err = PTR_ERR(mac_fg); goto err_mac_fg; @@ -362,35 +419,82 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa br_offloads->ingress_ft = NULL; } +static struct mlx5_flow_handle * +mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft, + struct mlx5_flow_table *skip_ft, + struct mlx5_pkt_reformat *pkt_reformat); + static int mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads, struct mlx5_esw_bridge *bridge) { - struct mlx5_flow_group *mac_fg, *vlan_fg; + struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg; + struct mlx5_pkt_reformat *miss_pkt_reformat = NULL; + struct mlx5_flow_handle *miss_handle = NULL; + struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_flow_table *egress_ft; int err; egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE, MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE, - br_offloads->esw); + esw); if (IS_ERR(egress_ft)) return PTR_ERR(egress_ft); - vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft); + vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft); if (IS_ERR(vlan_fg)) { err = PTR_ERR(vlan_fg); goto err_vlan_fg; } - mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft); + mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft); if (IS_ERR(mac_fg)) { err = PTR_ERR(mac_fg); goto err_mac_fg; } + if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) { + miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft); + if (IS_ERR(miss_fg)) { + esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n", + PTR_ERR(miss_fg)); + miss_fg = NULL; + goto skip_miss_flow; + } + + miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw); + if (IS_ERR(miss_pkt_reformat)) { + esw_warn(esw->dev, + "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n", + PTR_ERR(miss_pkt_reformat)); + miss_pkt_reformat = NULL; + mlx5_destroy_flow_group(miss_fg); + miss_fg = NULL; + goto skip_miss_flow; + } + + miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft, + br_offloads->skip_ft, + miss_pkt_reformat); + if (IS_ERR(miss_handle)) { + esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n", + PTR_ERR(miss_handle)); + miss_handle = NULL; + mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat); + miss_pkt_reformat = NULL; + mlx5_destroy_flow_group(miss_fg); + miss_fg = NULL; + goto skip_miss_flow; + } + } +skip_miss_flow: + bridge->egress_ft = egress_ft; bridge->egress_vlan_fg = vlan_fg; bridge->egress_mac_fg = mac_fg; + bridge->egress_miss_fg = miss_fg; + bridge->egress_miss_pkt_reformat = miss_pkt_reformat; + bridge->egress_miss_handle = miss_handle; return 0; err_mac_fg: @@ -403,6 +507,13 @@ err_vlan_fg: static void mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge) { + if (bridge->egress_miss_handle) + mlx5_del_flow_rules(bridge->egress_miss_handle); + if (bridge->egress_miss_pkt_reformat) + mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev, + bridge->egress_miss_pkt_reformat); + if (bridge->egress_miss_fg) + mlx5_destroy_flow_group(bridge->egress_miss_fg); mlx5_destroy_flow_group(bridge->egress_mac_fg); mlx5_destroy_flow_group(bridge->egress_vlan_fg); mlx5_destroy_flow_table(bridge->egress_ft); @@ -443,8 +554,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); if (vlan && vlan->pkt_reformat_push) { - flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.pkt_reformat = vlan->pkt_reformat_push; + flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark; } else if (vlan) { MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.cvlan_tag); @@ -599,6 +712,41 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u return handle; } +static struct mlx5_flow_handle * +mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft, + struct mlx5_flow_table *skip_ft, + struct mlx5_pkt_reformat *pkt_reformat) +{ + struct mlx5_flow_destination dest = { + .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, + .ft = skip_ft, + }; + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, + .flags = FLOW_ACT_NO_APPEND, + .pkt_reformat = pkt_reformat, + }; + struct mlx5_flow_spec *rule_spec; + struct mlx5_flow_handle *handle; + + rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL); + if (!rule_spec) + return ERR_PTR(-ENOMEM); + + rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + MLX5_SET(fte_match_param, rule_spec->match_criteria, + misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); + MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1, + ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK); + + handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1); + + kvfree(rule_spec); + return handle; +} + static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads) { @@ -798,24 +946,14 @@ mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5 static int mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw) { - struct mlx5_pkt_reformat_params reformat_params = {}; struct mlx5_pkt_reformat *pkt_reformat; - if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) || - MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) || - MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) < - offsetof(struct vlan_ethhdr, h_vlan_proto)) { + if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) { esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n"); return -EOPNOTSUPP; } - reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR; - reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START; - reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto); - reformat_params.size = sizeof(struct vlan_hdr); - pkt_reformat = mlx5_packet_reformat_alloc(esw->dev, - &reformat_params, - MLX5_FLOW_NAMESPACE_FDB); + pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw); if (IS_ERR(pkt_reformat)) { esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n", PTR_ERR(pkt_reformat)); @@ -833,6 +971,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_ vlan->pkt_reformat_pop = NULL; } +static int +mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_modify_hdr *pkt_mod_hdr; + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); + MLX5_SET(set_action_in, action, offset, 8); + MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS); + MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN); + + pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action); + if (IS_ERR(pkt_mod_hdr)) + return PTR_ERR(pkt_mod_hdr); + + vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr; + return 0; +} + +static void +mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw) +{ + mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark); + vlan->pkt_mod_hdr_push_mark = NULL; +} + static struct mlx5_esw_bridge_vlan * mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port, struct mlx5_eswitch *esw) @@ -852,6 +1017,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por err = mlx5_esw_bridge_vlan_push_create(vlan, esw); if (err) goto err_vlan_push; + + err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw); + if (err) + goto err_vlan_push_mark; } if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { err = mlx5_esw_bridge_vlan_pop_create(vlan, esw); @@ -870,6 +1039,9 @@ err_xa_insert: if (vlan->pkt_reformat_pop) mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw); err_vlan_pop: + if (vlan->pkt_mod_hdr_push_mark) + mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw); +err_vlan_push_mark: if (vlan->pkt_reformat_push) mlx5_esw_bridge_vlan_push_cleanup(vlan, esw); err_vlan_push: @@ -886,6 +1058,7 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port, static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_esw_bridge *bridge) { + struct mlx5_eswitch *esw = bridge->br_offloads->esw; struct mlx5_esw_bridge_fdb_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) { @@ -894,9 +1067,11 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan, } if (vlan->pkt_reformat_pop) - mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw); + mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw); + if (vlan->pkt_mod_hdr_push_mark) + mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw); if (vlan->pkt_reformat_push) - mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw); + mlx5_esw_bridge_vlan_push_cleanup(vlan, esw); } static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h index 52964a82d6a6..878311fe950a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h @@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan { struct list_head fdb_list; struct mlx5_pkt_reformat *pkt_reformat_push; struct mlx5_pkt_reformat *pkt_reformat_pop; + struct mlx5_modify_hdr *pkt_mod_hdr_push_mark; }; struct mlx5_esw_bridge_port { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2c7444101bb9..7461aafb321e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -447,8 +447,16 @@ enum { MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4), + MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5), }; +/* Returns true if any of the flags that require skipping further TC/NF processing are set. */ +static inline bool +mlx5_esw_attr_flags_skip(u32 attr_flags) +{ + return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT); +} + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_core_dev *in_mdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0d461e38add3..ca7e31a1a431 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -440,7 +440,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); (*i)++; - } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + } else if (mlx5_esw_attr_flags_skip(attr->flags)) { esw_setup_slow_path_dest(dest, flow_act, chains, *i); (*i)++; } else if (attr->dest_chain) { @@ -467,7 +467,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw, if (attr->dest_ft) { esw_cleanup_decap_indir(esw, attr); - } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { + } else if (!mlx5_esw_attr_flags_skip(attr->flags)) { if (attr->dest_chain) esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); else if (esw_is_indir_table(esw, attr)) @@ -678,7 +678,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, mlx5_del_flow_rules(rule); - if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { + if (!mlx5_esw_attr_flags_skip(attr->flags)) { /* unref the term table */ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { if (esw_attr->dests[i].termtbl) @@ -1009,7 +1009,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) u16 vport_num; num_vfs = esw->esw_funcs.num_vfs; - flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL); if (!flows) return -ENOMEM; @@ -1188,7 +1188,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, peer_miss_rules_setup(esw, peer_dev, spec, &dest); - flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); if (!flows) { err = -ENOMEM; goto alloc_flows_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index b45954905845..879d78e46e47 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -219,7 +219,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) || - attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH || + mlx5_esw_attr_flags_skip(attr->flags) || !mlx5_eswitch_offload_is_uplink_port(esw, spec)) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 18e5aec14641..f542a36be62c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -497,8 +497,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; - bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), - GFP_KERNEL); + bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); if (!bulk) goto err_alloc_bulk; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 269ebb53eda6..f7ebc1f9283f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -219,7 +219,7 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) { - unsigned char *dev_addr = priv->netdev->dev_addr; + const unsigned char *dev_addr = priv->netdev->dev_addr; u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; struct mlx5i_priv *ipriv = priv->ppriv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 79482824c64f..65313448a47c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1112,8 +1112,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) err = mlx5_fw_tracer_init(dev->tracer); if (err) { - mlx5_core_err(dev, "Failed to init FW tracer\n"); - goto err_fw_tracer; + mlx5_core_err(dev, "Failed to init FW tracer %d\n", err); + mlx5_fw_tracer_destroy(dev->tracer); + dev->tracer = NULL; } mlx5_fw_reset_events_start(dev); @@ -1121,8 +1122,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) err = mlx5_rsc_dump_init(dev); if (err) { - mlx5_core_err(dev, "Failed to init Resource dump\n"); - goto err_rsc_dump; + mlx5_core_err(dev, "Failed to init Resource dump %d\n", err); + mlx5_rsc_dump_destroy(dev); + dev->rsc_dump = NULL; } err = mlx5_fpga_device_start(dev); @@ -1192,11 +1194,9 @@ err_tls_start: mlx5_fpga_device_stop(dev); err_fpga_start: mlx5_rsc_dump_cleanup(dev); -err_rsc_dump: mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_fw_reset_events_stop(dev); mlx5_fw_tracer_cleanup(dev->tracer); -err_fw_tracer: mlx5_eq_table_destroy(dev); err_eq_table: mlx5_irq_table_destroy(dev); @@ -1537,8 +1537,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); pci_save_state(pdev); - if (!mlx5_core_is_mp_slave(dev)) - devlink_reload_enable(devlink); + devlink_register(devlink); return 0; err_init_one: @@ -1558,7 +1557,7 @@ static void remove_one(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); - devlink_reload_disable(devlink); + devlink_unregister(devlink); mlx5_crdump_disable(dev); mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index abd024173c42..8116815663a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -8,8 +8,6 @@ #define MLX5_COMP_EQS_PER_SF 8 -#define MLX5_IRQ_EQ_CTRL (0) - struct mlx5_irq; int mlx5_irq_table_init(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 763c83a02380..830444f927d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx); } -static void irq_set_name(char *name, int vecidx) +static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) { - if (vecidx == 0) { + if (!pool->xa_num_irqs.max) { + /* in case we only have a single irq for the device */ + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx); + return; + } + + if (vecidx == pool->xa_num_irqs.max) { snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx); return; } - snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", - vecidx - MLX5_IRQ_VEC_COMP_BASE); + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); +} + +static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) +{ + return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); } static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) @@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) if (!irq) return ERR_PTR(-ENOMEM); irq->irqn = pci_irq_vector(dev->pdev, i); - if (!pool->name[0]) - irq_set_name(name, i); + if (!irq_pool_is_sf_pool(pool)) + irq_set_name(pool, name, i); else irq_sf_set_name(pool, name, i); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); @@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, if (IS_ERR(irq) || !affinity) goto unlock; cpumask_copy(irq->mask, affinity); + if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max && + cpumask_empty(irq->mask)) + cpumask_set_cpu(0, irq->mask); irq_set_affinity_hint(irq->irqn, irq->mask); unlock: mutex_unlock(&pool->lock); @@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, } pf_irq: pool = irq_table->pf_pool; + vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx; irq = irq_pool_request_vector(pool, vecidx, affinity); out: if (IS_ERR(irq)) @@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev) int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) { + if (!table->pf_pool->xa_num_irqs.max) + return 1; return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min; } @@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) if (mlx5_core_is_sf(dev)) return 0; - pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + - MLX5_IRQ_VEC_COMP_BASE; + pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; pf_vec = min_t(int, pf_vec, num_eqs); - if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE) - return -ENOMEM; total_vec = pf_vec; if (mlx5_sf_max_functions(dev)) total_vec += MLX5_IRQ_CTRL_SF_MAX + MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); - total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1, - total_vec, PCI_IRQ_MSIX); + total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX); if (total_vec < 0) return total_vec; pf_vec = min(pf_vec, total_vec); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 052f48068dc1..7b4783ce213e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -46,7 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); goto init_one_err; } - devlink_reload_enable(devlink); + devlink_register(devlink); return 0; init_one_err: @@ -61,10 +61,9 @@ mdev_err: static void mlx5_sf_dev_remove(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - struct devlink *devlink; + struct devlink *devlink = priv_to_devlink(sf_dev->mdev); - devlink = priv_to_devlink(sf_dev->mdev); - devlink_reload_disable(devlink); + devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); mlx5_mdev_uninit(sf_dev->mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index a5b9f65db23c..50630112c8ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = { [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", + [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER", [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", @@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, /* If destination is vport we will get the FW flow table * that recalculates the CS and forwards to the vport. */ - ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, - dest_action->vport->caps->num, - final_icm_addr); + ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn, + dest_action->vport->caps->num, + final_icm_addr); if (ret) { mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); return ret; @@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, return -EOPNOTSUPP; case DR_ACTION_TYP_CTR: attr.ctr_id = action->ctr->ctr_id + - action->ctr->offeset; + action->ctr->offset; break; case DR_ACTION_TYP_TAG: attr.flow_tag = action->flow_tag->flow_tag; @@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { - if (action->vport->caps->num == WIRE_PORT) { + if (action->vport->caps->num == MLX5_VPORT_UPLINK) { mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); return -EOPNOTSUPP; } @@ -1747,7 +1748,7 @@ dec_ref: struct mlx5dr_action * mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, - u32 vport, u8 vhca_id_valid, + u16 vport, u8 vhca_id_valid, u16 vhca_id) { struct mlx5dr_cmd_vport_cap *vport_cap; @@ -1767,9 +1768,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, return NULL; } - vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport); + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport); if (!vport_cap) { - mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport); + mlx5dr_err(dmn, + "Failed to get vport 0x%x caps - vport is disabled or invalid\n", + vport); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 56307283bf9b..1d8febed0d76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port); + caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev); + return 0; } @@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, u32 table_id, u32 group_id, u32 modify_header_id, - u32 vport_id) + u16 vport) { u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {}; void *in_flow_context; @@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); MLX5_SET(dest_format_struct, in_dests, destination_type, MLX5_FLOW_DESTINATION_TYPE_VPORT); - MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id); + MLX5_SET(dest_format_struct, in_dests, destination_id, vport); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 0fe159809ba1..49089cbe897c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -9,48 +9,45 @@ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) -static int dr_domain_init_cache(struct mlx5dr_domain *dmn) +static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn) { /* Per vport cached FW FT for checksum recalculation, this - * recalculation is needed due to a HW bug. + * recalculation is needed due to a HW bug in STEv0. */ - dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, - sizeof(dmn->cache.recalc_cs_ft[0]), - GFP_KERNEL); - if (!dmn->cache.recalc_cs_ft) - return -ENOMEM; - - return 0; + xa_init(&dmn->csum_fts_xa); } -static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn) +static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn) { - int i; - - for (i = 0; i < dmn->info.caps.num_vports; i++) { - if (!dmn->cache.recalc_cs_ft[i]) - continue; + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; + unsigned long i; - mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]); + xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) { + if (recalc_cs_ft) + mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft); } - kfree(dmn->cache.recalc_cs_ft); + xa_destroy(&dmn->csum_fts_xa); } -int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, - u32 vport_num, - u64 *rx_icm_addr) +int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u16 vport_num, + u64 *rx_icm_addr) { struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; + int ret; - recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num]; + recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num); if (!recalc_cs_ft) { - /* Table not in cache, need to allocate a new one */ + /* Table hasn't been created yet */ recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num); if (!recalc_cs_ft) return -EINVAL; - dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft; + ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num, + recalc_cs_ft, GFP_KERNEL)); + if (ret) + return ret; } *rx_icm_addr = recalc_cs_ft->rx_icm_addr; @@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn) mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); } +static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn, + struct mlx5dr_cmd_vport_cap *uplink_vport) +{ + struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; + + uplink_vport->num = MLX5_VPORT_UPLINK; + uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; + uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; + uplink_vport->vport_gvmi = 0; + uplink_vport->vhca_gvmi = dmn->info.caps.gvmi; +} + static int dr_domain_query_vport(struct mlx5dr_domain *dmn, - bool other_vport, - u16 vport_number) + u16 vport_number, + struct mlx5dr_cmd_vport_cap *vport_caps) { - struct mlx5dr_cmd_vport_cap *vport_caps; + u16 cmd_vport = vport_number; + bool other_vport = true; int ret; - vport_caps = &dmn->info.caps.vports_caps[vport_number]; + if (vport_number == MLX5_VPORT_UPLINK) { + dr_domain_fill_uplink_caps(dmn, vport_caps); + return 0; + } + + if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) { + other_vport = false; + cmd_vport = 0; + } ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, other_vport, - vport_number, + cmd_vport, &vport_caps->icm_address_rx, &vport_caps->icm_address_tx); if (ret) @@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ret = mlx5dr_cmd_query_gvmi(dmn->mdev, other_vport, - vport_number, + cmd_vport, &vport_caps->vport_gvmi); if (ret) return ret; @@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, return 0; } -static int dr_domain_query_vports(struct mlx5dr_domain *dmn) +static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) { - struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; - struct mlx5dr_cmd_vport_cap *wire_vport; - int vport; + return dr_domain_query_vport(dmn, + dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, + &dmn->info.caps.vports.esw_manager_caps); +} + +static struct mlx5dr_cmd_vport_cap * +dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + struct mlx5dr_cmd_vport_cap *vport_caps; int ret; - /* Query vports (except wire vport) */ - for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) { - ret = dr_domain_query_vport(dmn, !!vport, vport); - if (ret) - return ret; + vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL); + if (!vport_caps) + return NULL; + + ret = dr_domain_query_vport(dmn, vport, vport_caps); + if (ret) { + kvfree(vport_caps); + return NULL; } - /* Last vport is the wire port */ - wire_vport = &dmn->info.caps.vports_caps[vport]; - wire_vport->num = WIRE_PORT; - wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; - wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; - wire_vport->vport_gvmi = 0; - wire_vport->vhca_gvmi = dmn->info.caps.gvmi; + ret = xa_insert(&caps->vports.vports_caps_xa, vport, + vport_caps, GFP_KERNEL); + if (ret) { + mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); + kvfree(vport_caps); + return ERR_PTR(ret); + } + + return vport_caps; +} + +struct mlx5dr_cmd_vport_cap * +mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + struct mlx5dr_cmd_vport_cap *vport_caps; + + if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) || + (!caps->is_ecpf && vport == 0)) + return &caps->vports.esw_manager_caps; + +vport_load: + vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); + if (vport_caps) + return vport_caps; + + vport_caps = dr_domain_add_vport_cap(dmn, vport); + if (PTR_ERR(vport_caps) == -EBUSY) + /* caps were already stored by another thread */ + goto vport_load; + + return vport_caps; +} + +static void dr_domain_clear_vports(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_vport_cap *vport_caps; + unsigned long i; + + xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) { + vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i); + kvfree(vport_caps); + } +} + +static int dr_domain_query_uplink(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_vport_cap *vport_caps; + + vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK); + if (!vport_caps) + return -EINVAL; return 0; } @@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx; dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx; - dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports, - sizeof(dmn->info.caps.vports_caps[0]), - GFP_KERNEL); - if (!dmn->info.caps.vports_caps) - return -ENOMEM; + xa_init(&dmn->info.caps.vports.vports_caps_xa); - ret = dr_domain_query_vports(dmn); + /* Query eswitch manager and uplink vports only. Rest of the + * vports (vport 0, VFs and SFs) will be queried dynamically. + */ + + ret = dr_domain_query_esw_mngr(dmn); if (ret) { - mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret); - goto free_vports_caps; + mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret); + goto free_vports_caps_xa; } - dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1; + ret = dr_domain_query_uplink(dmn); + if (ret) { + mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret); + goto free_vports_caps_xa; + } return 0; -free_vports_caps: - kfree(dmn->info.caps.vports_caps); - dmn->info.caps.vports_caps = NULL; +free_vports_caps_xa: + xa_destroy(&dmn->info.caps.vports.vports_caps_xa); + return ret; } @@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -EOPNOTSUPP; } - dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev); - ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps); if (ret) return ret; @@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; - vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); - if (!vport_cap) { - mlx5dr_err(dmn, "Failed to get esw manager vport\n"); - return -ENOENT; - } + vport_cap = &dmn->info.caps.vports.esw_manager_caps; dmn->info.supp_sw_steering = true; dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx; @@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn) { - kfree(dmn->info.caps.vports_caps); + dr_domain_clear_vports(dmn); + xa_destroy(&dmn->info.caps.vports.vports_caps_xa); } struct mlx5dr_domain * @@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) goto uninit_caps; } - ret = dr_domain_init_cache(dmn); - if (ret) { - mlx5dr_err(dmn, "Failed initialize domain cache\n"); - goto uninit_resourses; - } + dr_domain_init_csum_recalc_fts(dmn); return dmn; -uninit_resourses: - dr_domain_uninit_resources(dmn); uninit_caps: dr_domain_caps_uninit(dmn); free_domain: @@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) /* make sure resources are not used by the hardware */ mlx5dr_cmd_sync_steering(dmn->mdev); - dr_domain_uninit_cache(dmn); + dr_domain_uninit_csum_recalc_fts(dmn); dr_domain_uninit_resources(dmn); dr_domain_caps_uninit(dmn); mutex_destroy(&dmn->info.tx.mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 0d6f86eb248b..68a4c32d5f34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -5,7 +5,7 @@ #include "dr_types.h" struct mlx5dr_fw_recalc_cs_ft * -mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) +mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num) { struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index aca80efc28fa..323ea138ad99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain, return false; if (mask->misc.source_port) { - if (rx && value->misc.source_port != WIRE_PORT) + if (rx && value->misc.source_port != MLX5_VPORT_UPLINK) return true; - if (!rx && value->misc.source_port == WIRE_PORT) + if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK) return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 9c704bce3c12..b0649c2877dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_domain *dmn = sb->dmn; - struct mlx5dr_cmd_caps *caps; + struct mlx5dr_domain *vport_dmn; u8 *bit_mask = sb->bit_mask; bool source_gvmi_set; @@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, if (sb->vhca_id_valid) { /* Find port GVMI based on the eswitch_owner_vhca_id */ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) - caps = &dmn->info.caps; + vport_dmn = dmn; else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == dmn->peer_dmn->info.caps.gvmi)) - caps = &dmn->peer_dmn->info.caps; + vport_dmn = dmn->peer_dmn; else return -EINVAL; misc->source_eswitch_owner_vhca_id = 0; } else { - caps = &dmn->info.caps; + vport_dmn = dmn; } source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); if (source_gvmi_set) { - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, + misc->source_port); if (!vport_cap) { - mlx5dr_err(dmn, "Vport 0x%x is invalid\n", + mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", misc->source_port); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index b2481c99da79..cb9cf67b0a02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { u8 *d_action; - dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); - action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); - action_sz = DR_STE_ACTION_TRIPLE_SZ; + if (action_sz < DR_STE_ACTION_TRIPLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } d_action = action + DR_STE_ACTION_SINGLE_SZ; dr_ste_v1_set_encap_l3(last_ste, @@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_domain *dmn = sb->dmn; - struct mlx5dr_cmd_caps *caps; + struct mlx5dr_domain *vport_dmn; u8 *bit_mask = sb->bit_mask; DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); @@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, if (sb->vhca_id_valid) { /* Find port GVMI based on the eswitch_owner_vhca_id */ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) - caps = &dmn->info.caps; + vport_dmn = dmn; else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == dmn->peer_dmn->info.caps.gvmi)) - caps = &dmn->peer_dmn->info.caps; + vport_dmn = dmn->peer_dmn; else return -EINVAL; - misc->source_eswitch_owner_vhca_id = 0; + misc->source_eswitch_owner_vhca_id = 0; } else { - caps = &dmn->info.caps; + vport_dmn = dmn; } if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi)) return 0; - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port); if (!vport_cap) { mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", misc->source_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index b20e8aabb861..73fed94af09a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -4,7 +4,7 @@ #ifndef _DR_TYPES_ #define _DR_TYPES_ -#include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> #include <linux/refcount.h> #include "fs_core.h" #include "wq.h" @@ -14,7 +14,6 @@ #define DR_RULE_MAX_STES 18 #define DR_ACTION_MAX_STES 5 -#define WIRE_PORT 0xFFFF #define DR_STE_SVLAN 0x1 #define DR_STE_CVLAN 0x2 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) @@ -752,9 +751,9 @@ struct mlx5dr_esw_caps { struct mlx5dr_cmd_vport_cap { u16 vport_gvmi; u16 vhca_gvmi; + u16 num; u64 icm_address_rx; u64 icm_address_tx; - u32 num; }; struct mlx5dr_roce_cap { @@ -763,6 +762,11 @@ struct mlx5dr_roce_cap { u8 fl_rc_qp_when_roce_enabled:1; }; +struct mlx5dr_vports { + struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct xarray vports_caps_xa; +}; + struct mlx5dr_cmd_caps { u16 gvmi; u64 nic_rx_drop_address; @@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps { u8 flex_parser_id_gtpu_first_ext_dw_0; u8 max_ft_level; u16 roce_min_src_udp; - u8 num_esw_ports; u8 sw_format_ver; bool eswitch_manager; bool rx_sw_owner; @@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps { u8 rx_sw_owner_v2:1; u8 tx_sw_owner_v2:1; u8 fdb_sw_owner_v2:1; - u32 num_vports; struct mlx5dr_esw_caps esw_caps; - struct mlx5dr_cmd_vport_cap *vports_caps; + struct mlx5dr_vports vports; bool prio_tag_required; struct mlx5dr_roce_cap roce_caps; + u8 is_ecpf:1; u8 isolate_vl_tc:1; }; @@ -826,10 +829,6 @@ struct mlx5dr_domain_info { struct mlx5dr_cmd_caps caps; }; -struct mlx5dr_domain_cache { - struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft; -}; - struct mlx5dr_domain { struct mlx5dr_domain *peer_dmn; struct mlx5_core_dev *mdev; @@ -841,7 +840,7 @@ struct mlx5dr_domain { struct mlx5dr_icm_pool *action_icm_pool; struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; - struct mlx5dr_domain_cache cache; + struct xarray csum_fts_xa; struct mlx5dr_ste_ctx *ste_ctx; }; @@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl { struct mlx5dr_action_ctr { u32 ctr_id; - u32 offeset; + u32 offset; }; struct mlx5dr_action_vport { @@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl) return true; } -static inline struct mlx5dr_cmd_vport_cap * -mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) -{ - if (!caps->vports_caps || - (vport >= caps->num_vports && vport != WIRE_PORT)) - return NULL; - - if (vport == WIRE_PORT) - vport = caps->num_vports; - - return &caps->vports_caps[vport]; -} +struct mlx5dr_cmd_vport_cap * +mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport); struct mlx5dr_cmd_query_flow_table_details { u8 status; @@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, u32 table_id, u32 group_id, u32 modify_header_id, - u32 vport_id); + u16 vport_id); int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, u32 table_type, u32 table_id); @@ -1372,12 +1361,12 @@ struct mlx5dr_fw_recalc_cs_ft { }; struct mlx5dr_fw_recalc_cs_ft * -mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); +mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num); void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); -int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, - u32 vport_num, - u64 *rx_icm_addr); +int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u16 vport_num, + u64 *rx_icm_addr); int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_cmd_flow_destination_hw_info *dest, int num_dest, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 7e58f4e594b7..230e920e3845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } -#define MLX5_FLOW_CONTEXT_ACTION_MAX 20 +#define MLX5_FLOW_CONTEXT_ACTION_MAX 32 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *group, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index c5a8b1601999..c7c93131b762 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain, struct mlx5dr_action * mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, - u32 vport, u8 vhca_id_valid, + u16 vport, u8 vhca_id_valid, u16 vhca_id); struct mlx5dr_action * diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 6704f5c1aa32..b990782c1eb1 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -75,7 +75,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv) u64_to_ether_addr(local_mac, mac); if (is_valid_ether_addr(mac)) { - ether_addr_copy(priv->netdev->dev_addr, mac); + eth_hw_addr_set(priv->netdev, mac); } else { /* Provide a random MAC if for some reason the device has * not been configured with a valid MAC address already. diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h index 7654841a05c2..e6475ea77cd1 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h @@ -19,7 +19,7 @@ struct mlxfw_dev { static inline struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev) { - return mlxfw_dev->devlink->dev; + return devlink_to_dev(mlxfw_dev->devlink); } #define MLXFW_PRFX "mlxfw: " diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f080fab3de2b..3fd3812b8f31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -90,7 +90,6 @@ struct mlxsw_core { struct devlink_health_reporter *fw_fatal; } health; struct mlxsw_env *env; - bool is_initialized; /* Denotes if core was already initialized. */ unsigned long driver_priv[]; /* driver_priv has to be always the last item */ }; @@ -1975,12 +1974,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_emad_init; if (!reload) { - err = devlink_register(devlink); - if (err) - goto err_devlink_register; - } - - if (!reload) { err = mlxsw_core_params_register(mlxsw_core); if (err) goto err_register_params; @@ -1995,12 +1988,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_health_init; - if (mlxsw_driver->init) { - err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); - if (err) - goto err_driver_init; - } - err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); if (err) goto err_hwmon_init; @@ -2014,31 +2001,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_env_init; - mlxsw_core->is_initialized = true; - devlink_params_publish(devlink); - - if (!reload) - devlink_reload_enable(devlink); + if (mlxsw_driver->init) { + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); + if (err) + goto err_driver_init; + } + if (!reload) { + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); + } return 0; +err_driver_init: + mlxsw_env_fini(mlxsw_core->env); err_env_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: mlxsw_hwmon_fini(mlxsw_core->hwmon); err_hwmon_init: - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); -err_driver_init: mlxsw_core_health_fini(mlxsw_core); err_health_init: err_fw_rev_validate: if (!reload) mlxsw_core_params_unregister(mlxsw_core); err_register_params: - if (!reload) - devlink_unregister(devlink); -err_devlink_register: mlxsw_emad_fini(mlxsw_core); err_emad_init: kfree(mlxsw_core->lag.mapping); @@ -2088,7 +2075,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, struct devlink *devlink = priv_to_devlink(mlxsw_core); if (!reload) - devlink_reload_disable(devlink); + devlink_unregister(devlink); + if (devlink_is_reload_failed(devlink)) { if (!reload) /* Only the parts that were not de-initialized in the @@ -2099,18 +2087,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; } - devlink_params_unpublish(devlink); - mlxsw_core->is_initialized = false; + if (mlxsw_core->driver->fini) + mlxsw_core->driver->fini(mlxsw_core); mlxsw_env_fini(mlxsw_core->env); mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); mlxsw_core_health_fini(mlxsw_core); if (!reload) mlxsw_core_params_unregister(mlxsw_core); - if (!reload) - devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); kfree(mlxsw_core->lag.mapping); mlxsw_ports_fini(mlxsw_core, reload); @@ -2124,7 +2108,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, reload_fail_deinit: mlxsw_core_params_unregister(mlxsw_core); - devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } @@ -2939,49 +2922,6 @@ struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) return mlxsw_core->env; } -bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core) -{ - return mlxsw_core->is_initialized; -} - -int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module) -{ - enum mlxsw_reg_pmtm_module_type module_type; - char pmtm_pl[MLXSW_REG_PMTM_LEN]; - int err; - - mlxsw_reg_pmtm_pack(pmtm_pl, module); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl); - if (err) - return err; - mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type); - - /* Here we need to get the module width according to the module type. */ - - switch (module_type) { - case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: - case MLXSW_REG_PMTM_MODULE_TYPE_OSFP: - return 8; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP: - return 4; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: - case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: - case MLXSW_REG_PMTM_MODULE_TYPE_DSFP: - return 2; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: - case MLXSW_REG_PMTM_MODULE_TYPE_SFP: - return 1; - default: - return -EINVAL; - } -} -EXPORT_SYMBOL(mlxsw_core_module_max_width); - static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 80712dc803d0..12023a550007 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -249,8 +249,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u8 local_port); bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); -bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core); -int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 3713c45cfa1e..6dd4ae2f45f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -5,6 +5,7 @@ #include <linux/err.h> #include <linux/ethtool.h> #include <linux/sfp.h> +#include <linux/mutex.h> #include "core.h" #include "core_env.h" @@ -14,12 +15,15 @@ struct mlxsw_env_module_info { u64 module_overheat_counter; bool is_overheat; + int num_ports_mapped; + int num_ports_up; + enum ethtool_module_power_mode_policy power_mode_policy; }; struct mlxsw_env { struct mlxsw_core *core; u8 module_count; - spinlock_t module_info_lock; /* Protects 'module_info'. */ + struct mutex module_info_lock; /* Protects 'module_info'. */ struct mlxsw_env_module_info module_info[]; }; @@ -389,6 +393,205 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); +static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module) +{ + char pmaos_pl[MLXSW_REG_PMAOS_LEN]; + + mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_rst_set(pmaos_pl, true); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); +} + +int mlxsw_env_reset_module(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, u8 module, u32 *flags) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + u32 req = *flags; + int err; + + if (!(req & ETH_RESET_PHY) && + !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) + return 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].num_ports_up) { + netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n"); + err = -EINVAL; + goto out; + } + + if (mlxsw_env->module_info[module].num_ports_mapped > 1 && + !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) { + netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n"); + err = -EINVAL; + goto out; + } + + err = mlxsw_env_module_reset(mlxsw_core, module); + if (err) { + netdev_err(netdev, "Failed to reset module\n"); + goto out; + } + + *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)); + +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_reset_module); + +int +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + char mcion_pl[MLXSW_REG_MCION_LEN]; + u32 status_bits; + int err; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + params->policy = mlxsw_env->module_info[module].power_mode_policy; + + mlxsw_reg_mcion_pack(mcion_pl, module); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode"); + goto out; + } + + status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl); + if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK)) + goto out; + + if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK) + params->mode = ETHTOOL_MODULE_POWER_MODE_LOW; + else + params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH; + +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_get_module_power_mode); + +static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, + u8 module, bool enable) +{ + enum mlxsw_reg_pmaos_admin_status admin_status; + char pmaos_pl[MLXSW_REG_PMAOS_LEN]; + + mlxsw_reg_pmaos_pack(pmaos_pl, module); + admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED : + MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED; + mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status); + mlxsw_reg_pmaos_ase_set(pmaos_pl, true); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); +} + +static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, + u8 module, bool low_power) +{ + u16 eeprom_override_mask, eeprom_override; + char pmmp_pl[MLXSW_REG_PMMP_LEN]; + + mlxsw_reg_pmmp_pack(pmmp_pl, module); + mlxsw_reg_pmmp_sticky_set(pmmp_pl, true); + /* Mask all the bits except low power mode. */ + eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK; + mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask); + eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK : + 0; + mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl); +} + +static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, + u8 module, bool low_power, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_env_module_enable_set(mlxsw_core, module, false); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to disable module"); + return err; + } + + err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode"); + goto err_module_low_power_set; + } + + err = mlxsw_env_module_enable_set(mlxsw_core, module, true); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to enable module"); + goto err_module_enable_set; + } + + return 0; + +err_module_enable_set: + mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power); +err_module_low_power_set: + mlxsw_env_module_enable_set(mlxsw_core, module, true); + return err; +} + +int +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + bool low_power; + int err = 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && + policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); + return -EOPNOTSUPP; + } + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].power_mode_policy == policy) + goto out; + + /* If any ports are up, we are already in high power mode. */ + if (mlxsw_env->module_info[module].num_ports_up) + goto out_set_policy; + + low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO; + err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power, + extack); + if (err) + goto out; + +out_set_policy: + mlxsw_env->module_info[module].power_mode_policy = policy; +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_set_module_power_mode); + static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, u8 module, bool *p_has_temp_sensor) @@ -482,22 +685,32 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core, return 0; } -static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, - char *mtwe_pl, void *priv) +struct mlxsw_env_module_temp_warn_event { + struct mlxsw_env *mlxsw_env; + char mtwe_pl[MLXSW_REG_MTWE_LEN]; + struct work_struct work; +}; + +static void mlxsw_env_mtwe_event_work(struct work_struct *work) { - struct mlxsw_env *mlxsw_env = priv; + struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env *mlxsw_env; int i, sensor_warning; bool is_overheat; + event = container_of(work, struct mlxsw_env_module_temp_warn_event, + work); + mlxsw_env = event->mlxsw_env; + for (i = 0; i < mlxsw_env->module_count; i++) { /* 64-127 of sensor_index are mapped to the port modules * sequentially (module 0 is mapped to sensor_index 64, * module 1 to sensor_index 65 and so on) */ sensor_warning = - mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl, + mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl, i + MLXSW_REG_MTMP_MODULE_INDEX_MIN); - spin_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); is_overheat = mlxsw_env->module_info[i].is_overheat; @@ -507,13 +720,13 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, * warning OR current state in "no warning" and MTWE * does not report warning. */ - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); continue; } else if (is_overheat && !sensor_warning) { /* MTWE reports "no warning", turn is_overheat off. */ mlxsw_env->module_info[i].is_overheat = false; - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); } else { /* Current state is "no warning" and MTWE reports * "warning", increase the counter and turn is_overheat @@ -521,13 +734,32 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, */ mlxsw_env->module_info[i].is_overheat = true; mlxsw_env->module_info[i].module_overheat_counter++; - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); } } + + kfree(event); +} + +static void +mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl, + void *priv) +{ + struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env *mlxsw_env = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + + event->mlxsw_env = mlxsw_env; + memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN); + INIT_WORK(&event->work, mlxsw_env_mtwe_event_work); + mlxsw_core_schedule_work(&event->work); } static const struct mlxsw_listener mlxsw_env_temp_warn_listener = - MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE); + MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE); static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core) { @@ -568,9 +800,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - spin_lock_bh(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); mlxsw_env->module_info[event->module].is_overheat = false; - spin_unlock_bh(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module, &has_temp_sensor); @@ -652,8 +884,10 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core, for (i = 0; i < module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, i, - MLXSW_REG_PMAOS_E_GENERATE_EVENT); + mlxsw_reg_pmaos_pack(pmaos_pl, i); + mlxsw_reg_pmaos_e_set(pmaos_pl, + MLXSW_REG_PMAOS_E_GENERATE_EVENT); + mlxsw_reg_pmaos_ee_set(pmaos_pl, true); err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); if (err) return err; @@ -667,29 +901,110 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - /* Prevent switch driver from accessing uninitialized data. */ - if (!mlxsw_core_is_initialized(mlxsw_core)) { - *p_counter = 0; - return 0; - } - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) return -EINVAL; - spin_lock_bh(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); *p_counter = mlxsw_env->module_info[module].module_overheat_counter; - spin_unlock_bh(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); return 0; } EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get); +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_mapped++; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_map); + +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_mapped--; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_unmap); + +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + int err = 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].power_mode_policy != + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) + goto out_inc; + + if (mlxsw_env->module_info[module].num_ports_up != 0) + goto out_inc; + + /* Transition to high power mode following first port using the module + * being put administratively up. + */ + err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false, + NULL); + if (err) + goto out_unlock; + +out_inc: + mlxsw_env->module_info[module].num_ports_up++; +out_unlock: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_module_port_up); + +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + + mlxsw_env->module_info[module].num_ports_up--; + + if (mlxsw_env->module_info[module].power_mode_policy != + ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) + goto out_unlock; + + if (mlxsw_env->module_info[module].num_ports_up != 0) + goto out_unlock; + + /* Transition to low power mode following last port using the module + * being put administratively down. + */ + __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL); + +out_unlock: + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_down); + int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) { char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_env *env; u8 module_count; - int err; + int i, err; mlxsw_reg_mgpir_pack(mgpir_pl); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); @@ -702,7 +1017,14 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) if (!env) return -ENOMEM; - spin_lock_init(&env->module_info_lock); + /* Firmware defaults to high power mode policy where modules are + * transitioned to high power mode following plug-in. + */ + for (i = 0; i < module_count; i++) + env->module_info[i].power_mode_policy = + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH; + + mutex_init(&env->module_info_lock); env->core = mlxsw_core; env->module_count = module_count; *p_env = env; @@ -732,6 +1054,7 @@ err_oper_state_event_enable: err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); err_temp_warn_event_register: + mutex_destroy(&env->module_info_lock); kfree(env); return err; } @@ -742,5 +1065,6 @@ void mlxsw_env_fini(struct mlxsw_env *env) /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); mlxsw_env_temp_warn_event_unregister(env); + mutex_destroy(&env->module_info_lock); kfree(env); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index 0bf5bd0f8a7e..da121b1a84b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -24,9 +24,32 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); +int mlxsw_env_reset_module(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, u8 module, + u32 *flags); + +int +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack); + +int +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack); + int mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, u64 *p_counter); + +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module); + +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module); + +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module); + +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module); + int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env); void mlxsw_env_fini(struct mlxsw_env *env); diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index e92cadc98128..ab70a873a01a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -270,11 +270,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u8 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \ { \ __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -290,13 +292,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u8 \ +static inline u8 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u8 val) \ { \ @@ -311,11 +313,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u16 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \ { \ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -331,13 +335,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u16 \ +static inline u16 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u16 val) \ { \ @@ -352,11 +356,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u32 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \ { \ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -372,13 +378,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u32 \ +static inline u32 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u32 val) \ { \ @@ -393,11 +399,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +static inline u64 __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \ { \ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } @@ -413,13 +421,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u64 \ +static inline u64 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ u64 val) \ { \ @@ -433,19 +441,19 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bytes = _sizebytes,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \ { \ __mlxsw_item_memcpy_from(buf, dst, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ -static inline char * \ +static inline char * __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ { \ return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ @@ -460,7 +468,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bytes = _sizebytes,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \ unsigned short index, \ char *dst) \ @@ -468,7 +476,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \ __mlxsw_item_memcpy_from(buf, dst, \ &__ITEM_NAME(_type, _cname, _iname), index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ unsigned short index, \ const char *src) \ @@ -476,7 +484,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), index); \ } \ -static inline char * \ +static inline char * __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ { \ return __mlxsw_item_data(buf, \ @@ -491,14 +499,14 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bytes = _sizebytes,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u8 \ +static inline u8 __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \ { \ return __mlxsw_item_bit_array_get(buf, \ &__ITEM_NAME(_type, _cname, _iname), \ index); \ } \ -static inline void \ +static inline void __maybe_unused \ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \ { \ return __mlxsw_item_bit_array_set(buf, \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index d9d56c44e994..e0892f259adf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -54,8 +54,20 @@ static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m) return 0; } -static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) +static int mlxsw_m_port_open(struct net_device *dev) { + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module); +} + +static int mlxsw_m_port_stop(struct net_device *dev) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module); return 0; } @@ -70,8 +82,8 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev) } static const struct net_device_ops mlxsw_m_port_netdev_ops = { - .ndo_open = mlxsw_m_port_dummy_open_stop, - .ndo_stop = mlxsw_m_port_dummy_open_stop, + .ndo_open = mlxsw_m_port_open, + .ndo_stop = mlxsw_m_port_stop, .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port, }; @@ -124,11 +136,47 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, page, extack); } +static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module, + flags); +} + +static int +mlxsw_m_get_module_power_mode(struct net_device *netdev, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module, + params, extack); +} + +static int +mlxsw_m_set_module_power_mode(struct net_device *netdev, + const struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module, + params->policy, extack); +} + static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { .get_drvinfo = mlxsw_m_module_get_drvinfo, .get_module_info = mlxsw_m_get_module_info, .get_module_eeprom = mlxsw_m_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page, + .reset = mlxsw_m_reset, + .get_module_power_mode = mlxsw_m_get_module_power_mode, + .set_module_power_mode = mlxsw_m_set_module_power_mode, }; static int @@ -266,6 +314,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, if (WARN_ON_ONCE(module >= max_ports)) return -EINVAL; + mlxsw_env_module_port_map(mlxsw_m->core, module); mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports; return 0; @@ -274,6 +323,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) { mlxsw_m->module_to_port[module] = -1; + mlxsw_env_module_port_unmap(mlxsw_m->core, module); } static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6fbda6ebd590..48b817ba6d4e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4951,7 +4951,7 @@ enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_DISCARD_CNT = 0x6, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, - MLXSW_REG_PPCNT_TC_CONG_TC = 0x13, + MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13, }; /* reg_ppcnt_grp @@ -5371,7 +5371,7 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); -/* Ethernet Per Traffic Group Counters */ +/* Ethernet Per Traffic Class Counters */ /* reg_ppcnt_tc_transmit_queue * Contains the transmit queue depth in cells of traffic class @@ -5398,6 +5398,12 @@ MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, MLXSW_ITEM64(reg, ppcnt, wred_discard, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); +/* reg_ppcnt_ecn_marked_tc + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) @@ -5681,6 +5687,14 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN); +/* reg_pmaos_rst + * Module reset toggle. + * Note: Setting reset while module is plugged-in will result in transition to + * "initializing" operational state. + * Access: OP + */ +MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1); + /* reg_pmaos_slot_index * Slot index. * Access: Index @@ -5693,6 +5707,24 @@ MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4); */ MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8); +enum mlxsw_reg_pmaos_admin_status { + MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1, + MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2, + /* If the module is active and then unplugged, or experienced an error + * event, the operational status should go to "disabled" and can only + * be enabled upon explicit enable command. + */ + MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3, +}; + +/* reg_pmaos_admin_status + * Module administrative state (the desired state of the module). + * Note: To disable a module, all ports associated with the port must be + * administatively down first. + * Access: RW + */ +MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4); + /* reg_pmaos_ase * Admin state update enable. * If this bit is set, admin state will be updated based on admin_state field. @@ -5721,13 +5753,10 @@ enum mlxsw_reg_pmaos_e { */ MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2); -static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module, - enum mlxsw_reg_pmaos_e e) +static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module) { MLXSW_REG_ZERO(pmaos, payload); mlxsw_reg_pmaos_module_set(payload, module); - mlxsw_reg_pmaos_e_set(payload, e); - mlxsw_reg_pmaos_ee_set(payload, true); } /* PPLR - Port Physical Loopback Register @@ -5766,6 +5795,69 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port, MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0); } +/* PMTDB - Port Module To local DataBase Register + * ---------------------------------------------- + * The PMTDB register allows to query the possible module<->local port + * mapping than can be used in PMLP. It does not represent the actual/current + * mapping of the local to module. Actual mapping is only defined by PMLP. + */ +#define MLXSW_REG_PMTDB_ID 0x501A +#define MLXSW_REG_PMTDB_LEN 0x40 + +MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN); + +/* reg_pmtdb_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4); + +/* reg_pmtdb_module + * Module number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8); + +/* reg_pmtdb_ports_width + * Port's width + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4); + +/* reg_pmtdb_num_ports + * Number of ports in a single module (split/breakout) + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4); + +enum mlxsw_reg_pmtdb_status { + MLXSW_REG_PMTDB_STATUS_SUCCESS, +}; + +/* reg_pmtdb_status + * Status + * Access: RO + */ +MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4); + +/* reg_pmtdb_port_num + * The local_port value which can be assigned to the module. + * In case of more than one port, port<x> represent the /<x> port of + * the module. + * Access: RO + */ +MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false); + +static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, + u8 ports_width, u8 num_ports) +{ + MLXSW_REG_ZERO(pmtdb, payload); + mlxsw_reg_pmtdb_slot_index_set(payload, slot_index); + mlxsw_reg_pmtdb_module_set(payload, module); + mlxsw_reg_pmtdb_ports_width_set(payload, ports_width); + mlxsw_reg_pmtdb_num_ports_set(payload, num_ports); +} + /* PMPE - Port Module Plug/Unplug Event Register * --------------------------------------------- * This register reports any operational status change of a module. @@ -5860,67 +5952,100 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port, mlxsw_reg_pddr_page_select_set(payload, page_select); } -/* PMTM - Port Module Type Mapping Register - * ---------------------------------------- - * The PMTM allows query or configuration of module types. +/* PMMP - Port Module Memory Map Properties Register + * ------------------------------------------------- + * The PMMP register allows to override the module memory map advertisement. + * The register can only be set when the module is disabled by PMAOS register. */ -#define MLXSW_REG_PMTM_ID 0x5067 -#define MLXSW_REG_PMTM_LEN 0x10 +#define MLXSW_REG_PMMP_ID 0x5044 +#define MLXSW_REG_PMMP_LEN 0x2C -MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN); +MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN); -/* reg_pmtm_module +/* reg_pmmp_module * Module number. * Access: Index */ -MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8); +MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8); + +/* reg_pmmp_sticky + * When set, will keep eeprom_override values after plug-out event. + * Access: OP + */ +MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1); -enum mlxsw_reg_pmtm_module_type { - /* Backplane with 4 lanes */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_4X, - /* QSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_QSFP, - /* SFP */ - MLXSW_REG_PMTM_MODULE_TYPE_SFP, - /* Backplane with single lane */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4, - /* Backplane with two lane */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8, - /* Chip2Chip4x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10, - /* Chip2Chip2x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C2X, - /* Chip2Chip1x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C1X, - /* QSFP-DD */ - MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14, - /* OSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_OSFP, - /* SFP-DD */ - MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD, - /* DSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_DSFP, - /* Chip2Chip8x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C8X, +/* reg_pmmp_eeprom_override_mask + * Write mask bit (negative polarity). + * 0 - Allow write + * 1 - Ignore write + * On write, indicates which of the bits from eeprom_override field are + * updated. + * Access: WO + */ +MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16); + +enum { + /* Set module to low power mode */ + MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8), }; -/* reg_pmtm_module_type - * Module type. +/* reg_pmmp_eeprom_override + * Override / ignore EEPROM advertisement properties bitmask * Access: RW */ -MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4); +MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16); -static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module) +static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module) { - MLXSW_REG_ZERO(pmtm, payload); - mlxsw_reg_pmtm_module_set(payload, module); + MLXSW_REG_ZERO(pmmp, payload); + mlxsw_reg_pmmp_module_set(payload, module); } -static inline void -mlxsw_reg_pmtm_unpack(char *payload, - enum mlxsw_reg_pmtm_module_type *module_type) +/* PLLP - Port Local port to Label Port mapping Register + * ----------------------------------------------------- + * The PLLP register returns the mapping from Local Port into Label Port. + */ +#define MLXSW_REG_PLLP_ID 0x504A +#define MLXSW_REG_PLLP_LEN 0x10 + +MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN); + +/* reg_pllp_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8); + +/* reg_pllp_label_port + * Front panel label of the port. + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8); + +/* reg_pllp_split_num + * Label split mapping for local_port. + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4); + +/* reg_pllp_slot_index + * Slot index (0: Main board). + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4); + +static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pllp, payload); + mlxsw_reg_pllp_local_port_set(payload, local_port); +} + +static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port, + u8 *split_num, u8 *slot_index) { - *module_type = mlxsw_reg_pmtm_module_type_get(payload); + *label_port = mlxsw_reg_pllp_label_port_get(payload); + *split_num = mlxsw_reg_pllp_split_num_get(payload); + *slot_index = mlxsw_reg_pllp_slot_index_get(payload); } /* HTGT - Host Trap Group Table @@ -6664,6 +6789,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); } +static inline void +mlxsw_reg_ritr_loopback_ipip6_pack(char *payload, + enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, + enum mlxsw_reg_ritr_loopback_ipip_options options, + u16 uvr_id, u16 underlay_rif, + const struct in6_addr *usip, u32 gre_key) +{ + enum mlxsw_reg_ritr_loopback_protocol protocol = + MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6; + + mlxsw_reg_ritr_loopback_protocol_set(payload, protocol); + mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options, + uvr_id, underlay_rif, gre_key); + mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload, + (const char *)usip); +} + /* RTAR - Router TCAM Allocation Register * -------------------------------------- * This register is used for allocation of regions in the TCAM table. @@ -6932,6 +7074,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip) mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip); } +static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr) +{ + mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6); + mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr); +} + static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index, bool counter_enable) { @@ -8117,19 +8265,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload, } static inline void -mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, - enum mlxsw_reg_rtdp_ipip_sip_check sip_check, - unsigned int type_check, bool gre_key_check, - u32 ipv4_usip, u32 expected_gre_key) +mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 expected_gre_key) { mlxsw_reg_rtdp_ipip_irif_set(payload, irif); mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check); mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check); mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check); - mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +static inline void +mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv4_usip, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check, + gre_key_check, expected_gre_key); + mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); +} + +static inline void +mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv6_usip_ptr, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check, + gre_key_check, expected_gre_key); + mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr); +} + +/* RIPS - Router IP version Six Register + * ------------------------------------- + * The RIPS register is used to store IPv6 addresses for use by the NVE and + * IPinIP + */ +#define MLXSW_REG_RIPS_ID 0x8021 +#define MLXSW_REG_RIPS_LEN 0x14 + +MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN); + +/* reg_rips_index + * Index to IPv6 address. + * For Spectrum, the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24); + +/* reg_rips_ipv6 + * IPv6 address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16); + +static inline void mlxsw_reg_rips_pack(char *payload, u32 index, + const struct in6_addr *ipv6) +{ + MLXSW_REG_ZERO(rips, payload); + mlxsw_reg_rips_index_set(payload, index); + mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6); +} + /* RATRAD - Router Adjacency Table Activity Dump Register * ------------------------------------------------------ * The RATRAD register is used to dump and optionally clear activity bits of @@ -10208,6 +10408,39 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, MLXSW_REG_MLCR_DURATION_MAX : 0); } +/* MCION - Management Cable IO and Notifications Register + * ------------------------------------------------------ + * The MCION register is used to query transceiver modules' IO pins and other + * notifications. + */ +#define MLXSW_REG_MCION_ID 0x9052 +#define MLXSW_REG_MCION_LEN 0x18 + +MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN); + +/* reg_mcion_module + * Module number. + * Access: Index + */ +MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8); + +enum { + MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0), + MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8), +}; + +/* reg_mcion_module_status_bits + * Module IO status as defined by SFF. + * Access: RO + */ +MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16); + +static inline void mlxsw_reg_mcion_pack(char *payload, u8 module) +{ + MLXSW_REG_ZERO(mcion, payload); + mlxsw_reg_mcion_module_set(payload, module); +} + /* MTPPS - Management Pulse Per Second Register * -------------------------------------------- * This register provides the device PPS capabilities, configure the PPS in and @@ -12200,9 +12433,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pspa), MLXSW_REG(pmaos), MLXSW_REG(pplr), + MLXSW_REG(pmtdb), MLXSW_REG(pmpe), MLXSW_REG(pddr), - MLXSW_REG(pmtm), + MLXSW_REG(pmmp), + MLXSW_REG(pllp), MLXSW_REG(htgt), MLXSW_REG(hpkt), MLXSW_REG(rgcr), @@ -12210,6 +12445,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), + MLXSW_REG(rips), MLXSW_REG(ratrad), MLXSW_REG(rdpm), MLXSW_REG(ricnt), @@ -12249,6 +12485,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mgir), MLXSW_REG(mrsr), MLXSW_REG(mlcr), + MLXSW_REG(mcion), MLXSW_REG(mtpps), MLXSW_REG(mtutc), MLXSW_REG(mpsc), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index a56c9e19a390..a1512be77867 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -25,9 +25,6 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_SYSTEM_PORT, MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, - MLXSW_RES_ID_LOCAL_PORTS_IN_1X, - MLXSW_RES_ID_LOCAL_PORTS_IN_2X, - MLXSW_RES_ID_LOCAL_PORTS_IN_4X, MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER, MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_MAX_HEADROOM_SIZE, @@ -84,9 +81,6 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, - [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610, - [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611, - [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612, [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 250c5a24264d..d05850ff3a77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -47,7 +47,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 2406 +#define MLXSW_SP1_FWREV_SUBMINOR 3326 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -64,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { #define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 2406 +#define MLXSW_SP2_FWREV_SUBMINOR 3326 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -79,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { #define MLXSW_SP3_FWREV_MAJOR 30 #define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 2406 +#define MLXSW_SP3_FWREV_SUBMINOR 3326 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -351,12 +351,12 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); } -static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) +static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 swid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pspa_pl[MLXSW_REG_PSPA_LEN]; - mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); + mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); } @@ -529,55 +529,80 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, port_mapping->module = module; port_mapping->width = width; + port_mapping->module_width = width; port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); return 0; } -static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) +static int +mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const struct mlxsw_sp_port_mapping *port_mapping) { - struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; - int i; + int i, err; + + mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); for (i = 0; i < port_mapping->width; i++) { mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ } - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + goto err_pmlp_write; + return 0; + +err_pmlp_write: + mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); + return err; } -static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) +static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 module) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + mlxsw_env_module_port_unmap(mlxsw_sp->core, module); } static int mlxsw_sp_port_open(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + err = mlxsw_env_module_port_up(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); if (err) return err; + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) + goto err_port_admin_status_set; netif_start_queue(dev); return 0; + +err_port_admin_status_set: + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); + return err; } static int mlxsw_sp_port_stop(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; netif_stop_queue(dev); - return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); + return 0; } static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, @@ -649,7 +674,7 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); if (err) return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -799,12 +824,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev, for (i = 0; i < TC_MAX_QUEUE; i++) { err = mlxsw_sp_port_get_stats_raw(dev, - MLXSW_REG_PPCNT_TC_CONG_TC, + MLXSW_REG_PPCNT_TC_CONG_CNT, i, ppcnt_pl); - if (!err) - xstats->wred_drop[i] = - mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); + if (err) + goto tc_cnt; + + xstats->wred_drop[i] = + mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); + xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); +tc_cnt: err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, i, ppcnt_pl); if (err) @@ -1010,6 +1039,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); + case FLOW_BLOCK_BINDER_TYPE_RED_MARK: + return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); default: return -EOPNOTSUPP; } @@ -1442,29 +1473,68 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); } +static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *port_number, + u8 *split_port_subnumber, + u8 *slot_index) +{ + char pllp_pl[MLXSW_REG_PLLP_LEN]; + int err; + + mlxsw_reg_pllp_pack(pllp_pl, local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); + if (err) + return err; + mlxsw_reg_pllp_unpack(pllp_pl, port_number, + split_port_subnumber, slot_index); + return 0; +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 split_base_local_port, + bool split, struct mlxsw_sp_port_mapping *port_mapping) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - bool split = !!split_base_local_port; struct mlxsw_sp_port *mlxsw_sp_port; u32 lanes = port_mapping->width; + u8 split_port_subnumber; struct net_device *dev; + u8 port_number; + u8 slot_index; bool splittable; int err; + err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", + local_port); + return err; + } + + err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + local_port); + goto err_port_swid_set; + } + + err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, + &split_port_subnumber, &slot_index); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", + local_port); + goto err_port_label_info_get; + } + splittable = lanes > 1 && !split; err = mlxsw_core_port_init(mlxsw_sp->core, local_port, - port_mapping->module + 1, split, - port_mapping->lane / lanes, - splittable, lanes, - mlxsw_sp->base_mac, + port_number, split, split_port_subnumber, + splittable, lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", local_port); - return err; + goto err_core_port_init; } dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1480,7 +1550,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_sp_port->local_port = local_port; mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; mlxsw_sp_port->split = split; - mlxsw_sp_port->split_base_local_port = split_base_local_port; mlxsw_sp_port->mapping = *port_mapping; mlxsw_sp_port->link.autoneg = 1; INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); @@ -1498,20 +1567,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, dev->netdev_ops = &mlxsw_sp_port_netdev_ops; dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; - err = mlxsw_sp_port_module_map(mlxsw_sp_port); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", - mlxsw_sp_port->local_port); - goto err_port_module_map; - } - - err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", - mlxsw_sp_port->local_port); - goto err_port_swid_set; - } - err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", @@ -1712,21 +1767,24 @@ err_max_speed_get: err_port_speed_by_width_set: err_port_system_port_mapping_set: err_dev_addr_init: - mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); -err_port_swid_set: - mlxsw_sp_port_module_unmap(mlxsw_sp_port); -err_port_module_map: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: free_netdev(dev); err_alloc_etherdev: mlxsw_core_port_fini(mlxsw_sp->core, local_port); +err_core_port_init: +err_port_label_info_get: + mlxsw_sp_port_swid_set(mlxsw_sp, local_port, + MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); return err; } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + u8 module = mlxsw_sp_port->mapping.module; cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); @@ -1742,12 +1800,13 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); mlxsw_sp_port_buffers_fini(mlxsw_sp_port); - mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); - mlxsw_sp_port_module_unmap(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); free_netdev(mlxsw_sp_port->dev); mlxsw_core_port_fini(mlxsw_sp->core, local_port); + mlxsw_sp_port_swid_set(mlxsw_sp, local_port, + MLXSW_PORT_SWID_DISABLED_PORT); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); } static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) @@ -1789,8 +1848,15 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp_port); } +static bool mlxsw_sp_local_port_valid(u8 local_port) +{ + return local_port != MLXSW_PORT_CPU_PORT; +} + static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) { + if (!mlxsw_sp_local_port_valid(local_port)) + return false; return mlxsw_sp->ports[local_port] != NULL; } @@ -1827,7 +1893,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) port_mapping = mlxsw_sp->port_mapping[i]; if (!port_mapping) continue; - err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); + err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); if (err) goto err_port_create; } @@ -1894,17 +1960,10 @@ static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->port_mapping); } -static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) -{ - u8 offset = (local_port - 1) % max_width; - - return local_port - offset; -} - static int -mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, +mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port_mapping *port_mapping, - unsigned int count, u8 offset) + unsigned int count, const char *pmtdb_pl) { struct mlxsw_sp_port_mapping split_port_mapping; int err, i; @@ -1912,8 +1971,13 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, split_port_mapping = *port_mapping; split_port_mapping.width /= count; for (i = 0; i < count; i++) { - err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, - base_port, &split_port_mapping); + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + if (!mlxsw_sp_local_port_valid(s_local_port)) + continue; + + err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, + true, &split_port_mapping); if (err) goto err_port_create; split_port_mapping.lane += split_port_mapping.width; @@ -1922,49 +1986,34 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, return 0; err_port_create: - for (i--; i >= 0; i--) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); + for (i--; i >= 0; i--) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); + } return err; } static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, - u8 base_port, - unsigned int count, u8 offset) + unsigned int count, + const char *pmtdb_pl) { struct mlxsw_sp_port_mapping *port_mapping; int i; /* Go over original unsplit ports in the gap and recreate them. */ - for (i = 0; i < count * offset; i++) { - port_mapping = mlxsw_sp->port_mapping[base_port + i]; - if (!port_mapping) + for (i = 0; i < count; i++) { + u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + port_mapping = mlxsw_sp->port_mapping[local_port]; + if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) continue; - mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); + mlxsw_sp_port_create(mlxsw_sp, local_port, + false, port_mapping); } } -static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, - unsigned int count, - unsigned int max_width) -{ - enum mlxsw_res_id local_ports_in_x_res_id; - int split_width = max_width / count; - - if (split_width == 1) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; - else if (split_width == 2) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; - else if (split_width == 4) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; - else - return -EINVAL; - - if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) - return -EINVAL; - return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); -} - static struct mlxsw_sp_port * mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) { @@ -1980,9 +2029,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port_mapping port_mapping; struct mlxsw_sp_port *mlxsw_sp_port; - int max_width; - u8 base_port; - int offset; + enum mlxsw_reg_pmtdb_status status; + char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; int i; int err; @@ -1994,57 +2042,37 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return -EINVAL; } - max_width = mlxsw_core_module_max_width(mlxsw_core, - mlxsw_sp_port->mapping.module); - if (max_width < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); - return max_width; + if (mlxsw_sp_port->split) { + NL_SET_ERR_MSG_MOD(extack, "Port is already split"); + return -EINVAL; } - /* Split port with non-max cannot be split. */ - if (mlxsw_sp_port->mapping.width != max_width) { - netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); - NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); - return -EINVAL; + mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_sp_port->mapping.module_width / count, + count); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); + return err; } - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); - if (offset < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); + status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); + if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); return -EINVAL; } - /* Only in case max split is being done, the local port and - * base port may differ. - */ - base_port = count == max_width ? - mlxsw_sp_cluster_base_port_get(local_port, max_width) : - local_port; + port_mapping = mlxsw_sp_port->mapping; - for (i = 0; i < count * offset; i++) { - /* Expect base port to exist and also the one in the middle in - * case of maximal split count. - */ - if (i == 0 || (count == max_width && i == count / 2)) - continue; + for (i = 0; i < count; i++) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { - netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); - NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); - return -EINVAL; - } + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); } - port_mapping = mlxsw_sp_port->mapping; - - for (i = 0; i < count; i++) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); - - err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, - count, offset); + err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, + count, pmtdb_pl); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); goto err_port_split_create; @@ -2053,7 +2081,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; err_port_split_create: - mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); + mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); return err; } @@ -2062,11 +2090,10 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; + char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; unsigned int count; - int max_width; - u8 base_port; - int offset; int i; + int err; mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { @@ -2077,35 +2104,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, } if (!mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); NL_SET_ERR_MSG_MOD(extack, "Port was not split"); return -EINVAL; } - max_width = mlxsw_core_module_max_width(mlxsw_core, - mlxsw_sp_port->mapping.module); - if (max_width < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); - return max_width; - } + count = mlxsw_sp_port->mapping.module_width / + mlxsw_sp_port->mapping.width; - count = max_width / mlxsw_sp_port->mapping.width; - - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); - if (WARN_ON(offset < 0)) { - netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); - return -EINVAL; + mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_sp_port->mapping.module_width / count, + count); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); + return err; } - base_port = mlxsw_sp_port->split_base_local_port; + for (i = 0; i < count; i++) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - for (i = 0; i < count; i++) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); + } - mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); + mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3a43cba6d23c..3ab57e98cad2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -144,7 +144,8 @@ struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; - u8 width; + u8 width; /* Number of lanes used by the port */ + u8 module_width; /* Number of lanes in the module (static) */ u8 lane; }; @@ -284,6 +285,7 @@ struct mlxsw_sp_port_vlan { /* No need an internal lock; At worse - miss a single periodic iteration */ struct mlxsw_sp_port_xstats { u64 ecn; + u64 tc_ecn[TC_MAX_QUEUE]; u64 wred_drop[TC_MAX_QUEUE]; u64 tail_drop[TC_MAX_QUEUE]; u64 backlog[TC_MAX_QUEUE]; @@ -345,7 +347,6 @@ struct mlxsw_sp_port { u16 egr_types; struct mlxsw_sp_ptp_port_stats stats; } ptp; - u8 split_base_local_port; int max_mtu; u32 max_speed; struct mlxsw_sp_hdroom *hdroom; @@ -747,6 +748,7 @@ enum mlxsw_sp_kvdl_entry_type { MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, }; @@ -758,6 +760,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: + case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS: case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: default: return 1; @@ -1193,6 +1196,8 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_fifo_qopt_offload *p); int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f); +int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f); /* spectrum_fid.c */ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index 3a73d654017f..10ae1115de6c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { MAX_KVD_ACTION_SETS), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9de160e740b2..d78cf5a7220a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -1583,7 +1583,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; - unsigned long cb_priv; + unsigned long cb_priv = 0; LIST_HEAD(bulk_list); char *sbsr_pl; u8 masked_count; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 267590a0eee7..84d4460f3dcd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -96,6 +96,9 @@ mlxsw_sp_link_ext_state_opcode_map[] = { {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, + + {1042, ETHTOOL_LINK_EXT_STATE_MODULE, + ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY}, }; static void @@ -124,6 +127,10 @@ mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_m link_ext_state_info->cable_issue = link_ext_state_mapping.link_ext_substate; break; + case ETHTOOL_LINK_EXT_STATE_MODULE: + link_ext_state_info->module = + link_ext_state_mapping.link_ext_substate; + break; default: break; } @@ -1197,6 +1204,41 @@ mlxsw_sp_get_rmon_stats(struct net_device *dev, *ranges = mlxsw_rmon_ranges; } +static int mlxsw_sp_reset(struct net_device *dev, u32 *flags) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags); +} + +static int +mlxsw_sp_get_module_power_mode(struct net_device *dev, + struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params, + extack); +} + +static int +mlxsw_sp_set_module_power_mode(struct net_device *dev, + const struct ethtool_module_power_mode_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module, + params->policy, extack); +} + const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .cap_link_lanes_supported = true, .get_drvinfo = mlxsw_sp_port_get_drvinfo, @@ -1218,6 +1260,9 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats, .get_rmon_stats = mlxsw_sp_get_rmon_stats, + .reset = mlxsw_sp_reset, + .get_module_power_mode = mlxsw_sp_get_module_power_mode, + .set_module_power_mode = mlxsw_sp_set_module_power_mode, }; struct mlxsw_sp1_port_link_mode { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 5facabd86882..ad3926de88f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) return tun->parms; } -static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) { - return !!(parms.i_flags & TUNNEL_KEY); + return !!(parms->i_flags & TUNNEL_KEY); } -static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) { - return !!(parms.o_flags & TUNNEL_KEY); + return !!(parms->i_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) +{ + return !!(parms->o_flags & TUNNEL_KEY); +} + +static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) +{ + return !!(parms->o_flags & TUNNEL_KEY); +} + +static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) { return mlxsw_sp_ipip_parms4_has_ikey(parms) ? - be32_to_cpu(parms.i_key) : 0; + be32_to_cpu(parms->i_key) : 0; } -static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms) +{ + return mlxsw_sp_ipip_parms6_has_ikey(parms) ? + be32_to_cpu(parms->i_key) : 0; +} + +static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) { return mlxsw_sp_ipip_parms4_has_okey(parms) ? - be32_to_cpu(parms.o_key) : 0; + be32_to_cpu(parms->o_key) : 0; +} + +static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms) +{ + return mlxsw_sp_ipip_parms6_has_okey(parms) ? + be32_to_cpu(parms->o_key) : 0; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr }; + return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms) +mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr }; + return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr }; + return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms) +mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr }; + return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr }; } union mlxsw_sp_l3addr @@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_saddr(parms4); + return mlxsw_sp_ipip_parms4_saddr(&parms4); case MLXSW_SP_L3_PROTO_IPV6: parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); - return mlxsw_sp_ipip_parms6_saddr(parms6); + return mlxsw_sp_ipip_parms6_saddr(&parms6); } WARN_ON(1); @@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_daddr(parms4).addr4; + return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; } static union mlxsw_sp_l3addr @@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_daddr(parms4); + return mlxsw_sp_ipip_parms4_daddr(&parms4); case MLXSW_SP_L3_PROTO_IPV6: parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); - return mlxsw_sp_ipip_parms6_daddr(parms6); + return mlxsw_sp_ipip_parms6_daddr(&parms6); } WARN_ON(1); @@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) return !memcmp(&addr, &naddr, sizeof(naddr)); } +static struct mlxsw_sp_ipip_parms +mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) +{ + struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + + return (struct mlxsw_sp_ipip_parms) { + .proto = MLXSW_SP_L3_PROTO_IPV4, + .saddr = mlxsw_sp_ipip_parms4_saddr(&parms), + .daddr = mlxsw_sp_ipip_parms4_daddr(&parms), + .link = parms.link, + .ikey = mlxsw_sp_ipip_parms4_ikey(&parms), + .okey = mlxsw_sp_ipip_parms4_okey(&parms), + }; +} + static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry, @@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, u32 ikey; parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms); - ikey = mlxsw_sp_ipip_parms4_ikey(parms); + has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms); + ikey = mlxsw_sp_ipip_parms4_ikey(&parms); mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); @@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; - lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ? + lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; return (struct mlxsw_sp_rif_ipip_lb_config){ .lb_ipipt = lb_ipipt, - .okey = mlxsw_sp_ipip_parms4_okey(parms), + .okey = mlxsw_sp_ipip_parms4_okey(&parms), .ul_protocol = MLXSW_SP_L3_PROTO_IPV4, .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, ol_dev), @@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_ipip_entry *ipip_entry, - struct netlink_ext_ack *extack) +mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + const struct mlxsw_sp_ipip_parms *new_parms, + struct netlink_ext_ack *extack) { - union mlxsw_sp_l3addr old_saddr, new_saddr; - union mlxsw_sp_l3addr old_daddr, new_daddr; - struct ip_tunnel_parm new_parms; + const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms; bool update_tunnel = false; bool update_decap = false; bool update_nhs = false; int err = 0; - new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - - new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms); - old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4); - new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms); - old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4); - - if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) { + if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) { u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); /* Since the local address has changed, if there is another * tunnel with a matching saddr, both need to be demoted. */ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, - MLXSW_SP_L3_PROTO_IPV4, - new_saddr, ul_tb_id, + new_parms->proto, + new_parms->saddr, + ul_tb_id, ipip_entry)) { mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); return 0; } update_tunnel = true; - } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) != - mlxsw_sp_ipip_parms4_okey(new_parms)) || - ipip_entry->parms4.link != new_parms.link) { + } else if (old_parms->okey != new_parms->okey || + old_parms->link != new_parms->link) { update_tunnel = true; - } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { + } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) { update_nhs = true; - } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) != - mlxsw_sp_ipip_parms4_ikey(new_parms)) { + } else if (old_parms->ikey != new_parms->ikey) { update_decap = true; } @@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, false, false, false, extack); + if (err) + return err; - ipip_entry->parms4 = new_parms; - return err; + ipip_entry->parms = *new_parms; + return 0; +} + +static int +mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_parms new_parms; + + new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev); + return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); +} + +static int +mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + return 0; +} + +static void +mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ } static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { .dev_type = ARPHRD_IPGRE, .ul_proto = MLXSW_SP_L3_PROTO_IPV4, + .inc_parsing_depth = false, + .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4, .nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4, .decap_config = mlxsw_sp_ipip_decap_config_gre4, .can_offload = mlxsw_sp_ipip_can_offload_gre4, .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4, .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4, + .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4, + .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4, }; -const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = { +static struct mlxsw_sp_ipip_parms +mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_parms parms = {0}; + + WARN_ON_ONCE(1); + return parms; +} + +static int +mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static int +mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + return false; +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct mlxsw_sp_rif_ipip_lb_config config = {0}; + + WARN_ON_ONCE(1); + return config; +} + +static int +mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static int +mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static void +mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON_ONCE(1); +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = { + .dev_type = ARPHRD_IP6GRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV6, + .inc_parsing_depth = true, + .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6, + .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6, + .decap_config = mlxsw_sp1_ipip_decap_config_gre6, + .can_offload = mlxsw_sp1_ipip_can_offload_gre6, + .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6, + .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6, + .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6, + .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = { [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, + [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops, +}; + +static struct mlxsw_sp_ipip_parms +mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev); + + return (struct mlxsw_sp_ipip_parms) { + .proto = MLXSW_SP_L3_PROTO_IPV6, + .saddr = mlxsw_sp_ipip_parms6_saddr(&parms), + .daddr = mlxsw_sp_ipip_parms6_daddr(&parms), + .link = parms.link, + .ikey = mlxsw_sp_ipip_parms6_ikey(&parms), + .okey = mlxsw_sp_ipip_parms6_okey(&parms), + }; +} + +static int +mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + enum mlxsw_reg_ratr_op op; + + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP, + adj_index, rif_index); + mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl, + ipip_entry->dip_kvdl_index); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int +mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); + char rtdp_pl[MLXSW_REG_RTDP_LEN]; + struct __ip6_tnl_parm parms; + unsigned int type_check; + bool has_ikey; + u32 ikey; + + parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms); + ikey = mlxsw_sp_ipip_parms6_ikey(&parms); + + mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); + mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); + + type_check = has_ikey ? + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY : + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE; + + /* Linux demuxes tunnels based on packet SIP (which must match tunnel + * remote IP). Thus configure decap so that it filters out packets that + * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is + * generated for packets that fail this criterion. Linux then handles + * such packets in slow path and generates ICMP destination unreachable. + */ + mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index, + MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6, + type_check, has_ikey, + ipip_entry->dip_kvdl_index, ikey); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); +} + +static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev); + bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; + bool inherit_ttl = tparm.hop_limit == 0; + __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ + + return (tparm.i_flags & ~okflags) == 0 && + (tparm.o_flags & ~okflags) == 0 && + inherit_ttl && inherit_tos && + mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev); +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev); + enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; + + lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ? + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; + return (struct mlxsw_sp_rif_ipip_lb_config){ + .lb_ipipt = lb_ipipt, + .okey = mlxsw_sp_ipip_parms6_okey(&parms), + .ul_protocol = MLXSW_SP_L3_PROTO_IPV6, + .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6, + ol_dev), + }; +} + +static int +mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_parms new_parms; + + new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev); + return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); +} + +static int +mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + char rips_pl[MLXSW_REG_RIPS_LEN]; + struct __ip6_tnl_parm parms6; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + &ipip_entry->dip_kvdl_index); + if (err) + return err; + + parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index, + &parms6.raddr); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); + if (err) + goto err_rips_write; + + return 0; + +err_rips_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + ipip_entry->dip_kvdl_index); + return err; +} + +static void +mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + ipip_entry->dip_kvdl_index); +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = { + .dev_type = ARPHRD_IP6GRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV6, + .inc_parsing_depth = true, + .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6, + .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6, + .decap_config = mlxsw_sp2_ipip_decap_config_gre6, + .can_offload = mlxsw_sp2_ipip_can_offload_gre6, + .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6, + .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6, + .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6, + .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = { + [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, + [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops, }; static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp, @@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp) return 0; } + +struct net_device * +mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) +{ + struct net *net = dev_net(ol_dev); + struct ip_tunnel *tun4; + struct ip6_tnl *tun6; + + switch (ol_dev->type) { + case ARPHRD_IPGRE: + tun4 = netdev_priv(ol_dev); + return dev_get_by_index_rcu(net, tun4->parms.link); + case ARPHRD_IP6GRE: + tun6 = netdev_priv(ol_dev); + return dev_get_by_index_rcu(net, tun6->parms.link); + default: + return NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index f0837b42d1d6..8cc259dcc8d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -7,6 +7,7 @@ #include "spectrum_router.h" #include <net/ip_fib.h> #include <linux/if_tunnel.h> +#include <net/ip6_tunnel.h> struct ip_tunnel_parm mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); @@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr); enum mlxsw_sp_ipip_type { MLXSW_SP_IPIP_TYPE_GRE4, + MLXSW_SP_IPIP_TYPE_GRE6, MLXSW_SP_IPIP_TYPE_MAX, }; +struct mlxsw_sp_ipip_parms { + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr saddr; + union mlxsw_sp_l3addr daddr; + int link; + u32 ikey; + u32 okey; +}; + struct mlxsw_sp_ipip_entry { enum mlxsw_sp_ipip_type ipipt; struct net_device *ol_dev; /* Overlay. */ struct mlxsw_sp_rif_ipip_lb *ol_lb; struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; - union { - struct ip_tunnel_parm parms4; - }; + struct mlxsw_sp_ipip_parms parms; + u32 dip_kvdl_index; }; struct mlxsw_sp_ipip_ops { int dev_type; enum mlxsw_sp_l3proto ul_proto; /* Underlay. */ + bool inc_parsing_depth; + + struct mlxsw_sp_ipip_parms + (*parms_init)(const struct net_device *ol_dev); int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry, @@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops { int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, struct netlink_ext_ack *extack); + int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry); + void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry); }; -extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[]; +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[]; +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[]; #endif /* _MLXSW_IPIP_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 9958d503bf0e..6d1431fa31d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -519,6 +519,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_qdisc->prio_bitmap, &stats_base->tx_packets, &stats_base->tx_bytes); + red_base->prob_mark = xstats->tc_ecn[tclass_num]; red_base->prob_drop = xstats->wred_drop[tclass_num]; red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); @@ -618,19 +619,22 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, int tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; struct red_stats *res = xstats_ptr; - int early_drops, pdrops; + int early_drops, marks, pdrops; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; + marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark; pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; + res->prob_mark += marks; xstats_base->pdrop += pdrops; xstats_base->prob_drop += early_drops; + xstats_base->prob_mark += marks; return 0; } @@ -648,7 +652,8 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base = &mlxsw_sp_qdisc->stats_base; mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr); - overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits; + overlimits = xstats->wred_drop[tclass_num] + + xstats->tc_ecn[tclass_num] - stats_base->overlimits; stats_ptr->qstats->overlimits += overlimits; stats_base->overlimits += overlimits; @@ -1472,6 +1477,7 @@ struct mlxsw_sp_qevent_binding { u32 handle; int tclass_num; enum mlxsw_sp_span_trigger span_trigger; + unsigned int action_mask; }; static LIST_HEAD(mlxsw_sp_qevent_block_cb_list); @@ -1482,8 +1488,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_span_agent_parms *agent_parms, int *p_span_id) { + enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger; struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + bool ingress; int span_id; int err; @@ -1491,18 +1499,19 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, if (err) return err; - err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true); + ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger); + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress); if (err) goto err_analyzed_port_get; trigger_parms.span_id = span_id; trigger_parms.probability_rate = 1; - err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port, &trigger_parms); if (err) goto err_agent_bind; - err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger, + err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger, qevent_binding->tclass_num); if (err) goto err_trigger_enable; @@ -1511,10 +1520,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, return 0; err_trigger_enable: - mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, &trigger_parms); err_agent_bind: - mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress); err_analyzed_port_get: mlxsw_sp_span_agent_put(mlxsw_sp, span_id); return err; @@ -1524,16 +1533,20 @@ static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_qevent_binding *qevent_binding, int span_id) { + enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger; struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = { .span_id = span_id, }; + bool ingress; - mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger, + ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger); + + mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger, qevent_binding->tclass_num); - mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, &trigger_parms); - mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress); mlxsw_sp_span_agent_put(mlxsw_sp, span_id); } @@ -1583,10 +1596,17 @@ static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id); } -static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mall_entry *mall_entry, - struct mlxsw_sp_qevent_binding *qevent_binding) +static int +mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding, + struct netlink_ext_ack *extack) { + if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) { + NL_SET_ERR_MSG(extack, "Action not supported at this qevent"); + return -EOPNOTSUPP; + } + switch (mall_entry->type) { case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding); @@ -1614,15 +1634,17 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp, } } -static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block, - struct mlxsw_sp_qevent_binding *qevent_binding) +static int +mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block, + struct mlxsw_sp_qevent_binding *qevent_binding, + struct netlink_ext_ack *extack) { struct mlxsw_sp_mall_entry *mall_entry; int err; list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) { err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry, - qevent_binding); + qevent_binding, extack); if (err) goto err_entry_configure; } @@ -1646,13 +1668,17 @@ static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qe qevent_binding); } -static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block) +static int +mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block, + struct netlink_ext_ack *extack) { struct mlxsw_sp_qevent_binding *qevent_binding; int err; list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) { - err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); + err = mlxsw_sp_qevent_binding_configure(qevent_block, + qevent_binding, + extack); if (err) goto err_binding_configure; } @@ -1737,7 +1763,7 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp, list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list); - err = mlxsw_sp_qevent_block_configure(qevent_block); + err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack); if (err) goto err_block_configure; @@ -1825,7 +1851,8 @@ static void mlxsw_sp_qevent_block_release(void *cb_priv) static struct mlxsw_sp_qevent_binding * mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num, - enum mlxsw_sp_span_trigger span_trigger) + enum mlxsw_sp_span_trigger span_trigger, + unsigned int action_mask) { struct mlxsw_sp_qevent_binding *binding; @@ -1837,6 +1864,7 @@ mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, binding->handle = handle; binding->tclass_num = tclass_num; binding->span_trigger = span_trigger; + binding->action_mask = action_mask; return binding; } @@ -1862,9 +1890,11 @@ mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block, return NULL; } -static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct flow_block_offload *f, - enum mlxsw_sp_span_trigger span_trigger) +static int +mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger, + unsigned int action_mask) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_qevent_binding *qevent_binding; @@ -1904,14 +1934,18 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po goto err_binding_exists; } - qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle, - qdisc->tclass_num, span_trigger); + qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, + f->sch->handle, + qdisc->tclass_num, + span_trigger, + action_mask); if (IS_ERR(qevent_binding)) { err = PTR_ERR(qevent_binding); goto err_binding_create; } - err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); + err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding, + f->extack); if (err) goto err_binding_configure; @@ -1963,15 +1997,19 @@ static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp } } -static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, - struct flow_block_offload *f, - enum mlxsw_sp_span_trigger span_trigger) +static int +mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger, + unsigned int action_mask) { f->driver_block_list = &mlxsw_sp_qevent_block_cb_list; switch (f->command) { case FLOW_BLOCK_BIND: - return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger); + return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, + span_trigger, + action_mask); case FLOW_BLOCK_UNBIND: mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger); return 0; @@ -1983,7 +2021,22 @@ static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f) { - return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP); + unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) | + BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP); + + return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, + MLXSW_SP_SPAN_TRIGGER_EARLY_DROP, + action_mask); +} + +int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f) +{ + unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR); + + return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, + MLXSW_SP_SPAN_TRIGGER_ECN, + action_mask); } int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 19bb3ca0515e..1e141b5944cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -115,6 +115,7 @@ struct mlxsw_sp_rif_ops { struct mlxsw_sp_router_ops { int (*init)(struct mlxsw_sp *mlxsw_sp); + int (*ipips_init)(struct mlxsw_sp *mlxsw_sp); }; static struct mlxsw_sp_rif * @@ -1055,22 +1056,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->router->vrs); } -static struct net_device * -__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) -{ - struct ip_tunnel *tun = netdev_priv(ol_dev); - struct net *net = dev_net(ol_dev); - - return dev_get_by_index_rcu(net, tun->parms.link); -} - u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) { struct net_device *d; u32 tb_id; rcu_read_lock(); - d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); if (d) tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN; else @@ -1116,6 +1108,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_ipip_entry *ret = NULL; + int err; ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); @@ -1131,26 +1124,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; + ipip_entry->parms = ipip_ops->parms_init(ol_dev); - switch (ipip_ops->ul_proto) { - case MLXSW_SP_L3_PROTO_IPV4: - ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - break; - case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON(1); - break; + err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry); + if (err) { + ret = ERR_PTR(err); + goto err_rem_ip_addr_set; } return ipip_entry; +err_rem_ip_addr_set: + mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); err_ol_ipip_lb_create: kfree(ipip_entry); return ret; } -static void -mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) +static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) { + const struct mlxsw_sp_ipip_ops *ipip_ops = + mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; + + ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry); mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); kfree(ipip_entry); } @@ -1174,6 +1171,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_l3addr_eq(&tun_saddr, &saddr); } +static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; + + /* Not all tunnels require to increase the default pasing depth + * (96 bytes). + */ + if (ipip_ops->inc_parsing_depth) + return mlxsw_sp_parsing_depth_inc(mlxsw_sp); + + return 0; +} + +static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops = + mlxsw_sp->router->ipip_ops_arr[ipipt]; + + if (ipip_ops->inc_parsing_depth) + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +} + static int mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, @@ -1187,18 +1210,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp, + ipip_entry->ipipt); + if (err) + goto err_parsing_depth_inc; + ipip_entry->decap_fib_entry = fib_entry; fib_entry->decap.ipip_entry = ipip_entry; fib_entry->decap.tunnel_index = tunnel_index; + return 0; + +err_parsing_depth_inc: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + fib_entry->decap.tunnel_index); + return err; } static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { + enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt; + /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; + mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt); mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, fib_entry->decap.tunnel_index); } @@ -1309,6 +1346,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, saddr_len = 4; saddr_prefix_len = 32; break; + case MLXSW_SP_L3_PROTO_IPV6: + saddrp = &saddr.addr6; + saddr_len = 16; + saddr_prefix_len = 128; + break; default: WARN_ON(1); return NULL; @@ -1345,7 +1387,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { list_del(&ipip_entry->ipip_list_node); - mlxsw_sp_ipip_entry_dealloc(ipip_entry); + mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry); } static bool @@ -1450,7 +1492,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, struct net_device *ipip_ul_dev; rcu_read_lock(); - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); rcu_read_unlock(); if (ipip_ul_dev == ul_dev) @@ -1536,23 +1578,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id, u16 ul_rif_id, bool enable) { struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; + enum mlxsw_reg_ritr_loopback_ipip_options ipip_options; struct mlxsw_sp_rif *rif = &lb_rif->common; struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; char ritr_pl[MLXSW_REG_RITR_LEN]; + struct in6_addr *saddr6; u32 saddr4; + ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET; switch (lb_cf.ul_protocol) { case MLXSW_SP_L3_PROTO_IPV4: saddr4 = be32_to_cpu(lb_cf.saddr.addr4); mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, rif->rif_index, rif->vr_id, rif->dev->mtu); mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, - MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, - ul_vr_id, ul_rif_id, saddr4, lb_cf.okey); + ipip_options, ul_vr_id, + ul_rif_id, saddr4, + lb_cf.okey); break; case MLXSW_SP_L3_PROTO_IPV6: - return -EAFNOSUPPORT; + saddr6 = &lb_cf.saddr.addr6; + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt, + ipip_options, ul_vr_id, + ul_rif_id, saddr6, + lb_cf.okey); + break; } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); @@ -1827,7 +1880,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, struct net_device *ipip_ul_dev; rcu_read_lock(); - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); rcu_read_unlock(); if (ipip_ul_dev == ul_dev) mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); @@ -4152,7 +4205,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) bool is_up; rcu_read_lock(); - ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true; rcu_read_unlock(); @@ -4376,6 +4429,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } } +static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_ratr_trap_action trap_action; + char ratr_pl[MLXSW_REG_RATR_LEN]; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + &mlxsw_sp->router->adj_trap_index); + if (err) + return err; + + trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, + MLXSW_REG_RATR_TYPE_ETHERNET, + mlxsw_sp->router->adj_trap_index, + mlxsw_sp->router->lb_rif_index); + mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); + mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); + if (err) + goto err_ratr_write; + + return 0; + +err_ratr_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_trap_index); + return err; +} + +static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_trap_index); +} + +static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups)) + return 0; + + err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp); + if (err) + return err; + + refcount_set(&mlxsw_sp->router->num_groups, 1); + + return 0; +} + +static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp) +{ + if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups)) + return; + + mlxsw_sp_adj_trap_entry_fini(mlxsw_sp); +} + static void mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nexthop_group *nh_grp, @@ -4790,6 +4903,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_obj_init; } + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) { NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device"); @@ -4808,6 +4924,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop_obj_init: for (i--; i >= 0; i--) { @@ -4832,6 +4950,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, cancel_delayed_work(&router->nh_grp_activity_dw); } + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -5223,6 +5342,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop4_init; } + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) goto err_group_refresh; @@ -5230,6 +5352,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop4_init: for (i--; i >= 0; i--) { @@ -5247,6 +5371,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; int i; + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -5725,41 +5850,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, return err; } -static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp) -{ - enum mlxsw_reg_ratr_trap_action trap_action; - char ratr_pl[MLXSW_REG_RATR_LEN]; - int err; - - if (mlxsw_sp->router->adj_discard_index_valid) - return 0; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - &mlxsw_sp->router->adj_discard_index); - if (err) - return err; - - trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, - MLXSW_REG_RATR_TYPE_ETHERNET, - mlxsw_sp->router->adj_discard_index, - mlxsw_sp->router->lb_rif_index); - mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); - mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); - if (err) - goto err_ratr_write; - - mlxsw_sp->router->adj_discard_index_valid = true; - - return 0; - -err_ratr_write: - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - mlxsw_sp->router->adj_discard_index); - return err; -} - static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, @@ -5772,7 +5862,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, u16 trap_id = 0; u32 adjacency_index = 0; u16 ecmp_size = 0; - int err; /* In case the nexthop group adjacency index is valid, use it * with provided ECMP size. Otherwise, setup trap and pass @@ -5783,11 +5872,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, adjacency_index = nhgi->adj_index; ecmp_size = nhgi->ecmp_size; } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) { - err = mlxsw_sp_adj_discard_write(mlxsw_sp); - if (err) - return err; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; - adjacency_index = mlxsw_sp->router->adj_discard_index; + adjacency_index = mlxsw_sp->router->adj_trap_index; ecmp_size = 1; } else { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; @@ -6036,8 +6122,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } static void -mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: @@ -6048,6 +6134,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, } } +static void +mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib4_entry *fib4_entry) +{ + mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common); +} + static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, @@ -6108,7 +6201,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; fib_info_put(fib4_entry->fi); - mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry); mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group, fib_node->fib); mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); @@ -6641,6 +6734,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); } nh_grp->nhgi = nhgi; + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) goto err_group_refresh; @@ -6648,6 +6744,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop6_init: for (i--; i >= 0; i--) { @@ -6665,6 +6763,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; int i; + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -6888,11 +6987,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry); } -static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - const struct fib6_info *rt) +static int +mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct fib6_info *rt) { - if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) + struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi; + union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr }; + int ifindex = nhgi->nexthops[0].ifindex; + struct mlxsw_sp_ipip_entry *ipip_entry; + + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex, + MLXSW_SP_L3_PROTO_IPV6, + dip); + + if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; + return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry, + ipip_entry); + } + + return 0; +} + +static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct fib6_info *rt) +{ + if (rt->fib6_flags & RTF_LOCAL) + return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry, + rt); + if (rt->fib6_flags & RTF_ANYCAST) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; else if (rt->fib6_type == RTN_BLACKHOLE) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; @@ -6902,6 +7028,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + + return 0; } static void @@ -6959,12 +7087,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_group_vr_link; - mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + if (err) + goto err_fib6_entry_type_set; fib_entry->fib_node = fib_node; return fib6_entry; +err_fib6_entry_type_set: + mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib); err_nexthop_group_vr_link: mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry); err_nexthop6_group_get: @@ -6983,11 +7115,19 @@ err_fib_entry_priv_create: return ERR_PTR(err); } +static void +mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common); +} + static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry) { struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry); mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group, fib_node->fib); mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); @@ -7340,16 +7480,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } - - /* After flushing all the routes, it is not possible anyone is still - * using the adjacency index that is discarding packets, so free it in - * case it was allocated. - */ - if (!mlxsw_sp->router->adj_discard_index_valid) - return; - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - mlxsw_sp->router->adj_discard_index); - mlxsw_sp->router->adj_discard_index_valid = false; } struct mlxsw_sp_fib6_event { @@ -9447,7 +9577,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) { int err; - mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp); @@ -9460,6 +9589,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); } +static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr; + return mlxsw_sp_ipips_init(mlxsw_sp); +} + +static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr; + return mlxsw_sp_ipips_init(mlxsw_sp); +} + static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) { WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list)); @@ -9874,6 +10015,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = { .init = mlxsw_sp1_router_init, + .ipips_init = mlxsw_sp1_ipips_init, }; static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) @@ -9889,6 +10031,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = { .init = mlxsw_sp2_router_init, + .ipips_init = mlxsw_sp2_ipips_init, }; int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, @@ -9934,7 +10077,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rifs_init; - err = mlxsw_sp_ipips_init(mlxsw_sp); + err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp); if (err) goto err_ipips_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 25d3eae63501..1d0d28f8ff05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -65,8 +65,6 @@ struct mlxsw_sp_router { struct notifier_block inet6addr_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; - u32 adj_discard_index; - bool adj_discard_index_valid; struct mlxsw_sp_router_nve_decap nve_decap_config; struct mutex lock; /* Protects shared router resources */ struct work_struct fib_event_work; @@ -82,6 +80,8 @@ struct mlxsw_sp_router { struct delayed_work nh_grp_activity_dw; struct list_head nh_res_grp_list; bool inc_parsing_depth; + refcount_t num_groups; + u32 adj_trap_index; }; struct mlxsw_sp_fib_entry_priv { @@ -226,6 +226,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp); +struct net_device * +mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev); extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 3398cc01e5ec..f5f819aa9a65 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -1650,6 +1650,22 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); } +bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger) +{ + switch (trigger) { + case MLXSW_SP_SPAN_TRIGGER_INGRESS: + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: + return true; + case MLXSW_SP_SPAN_TRIGGER_EGRESS: + case MLXSW_SP_SPAN_TRIGGER_ECN: + return false; + } + + WARN_ON_ONCE(1); + return false; +} + static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index efaefd1ae863..82e711afb02b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -120,6 +120,7 @@ int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_sp_span_trigger trigger, u8 tc); void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_sp_span_trigger trigger, u8 tc); +bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger); extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops; extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops; diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index b27713906d3a..0f2cdcd4a4c0 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -380,7 +380,7 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) } } -static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) +static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac) { unsigned long flags; unsigned i; @@ -1064,7 +1064,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, mac, netdev->addr_len); + eth_hw_addr_set(netdev, mac); ks8842_write_mac_addr(adapter, mac); return 0; @@ -1191,8 +1191,7 @@ static int ks8842_probe(struct platform_device *pdev) if (i < netdev->addr_len) /* an address was passed, use it */ - memcpy(netdev->dev_addr, pdata->macaddr, - netdev->addr_len); + eth_hw_addr_set(netdev, pdata->macaddr); } if (i == netdev->addr_len) { diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index a6db1a8156e1..2c4e5e602be7 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -195,7 +195,7 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np) struct net_device *dev = ks->netdev; int ret; - ret = of_get_mac_address(np, dev->dev_addr); + ret = of_get_ethdev_address(np, dev); if (!ret) { ks8851_write_mac_addr(dev); return; @@ -672,7 +672,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); return ks8851_write_mac_addr(dev); } diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index a0ee155f9f51..03ad8bdc1f0d 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4033,7 +4033,7 @@ static void hw_set_add_addr(struct ksz_hw *hw) } } -static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) +static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr) { int i; int j = ADDITIONAL_ENTRIES; @@ -4054,7 +4054,7 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) return -1; } -static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) +static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr) { int i; @@ -5581,7 +5581,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); } - memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, mac->sa_data); interrupt = hw_block_intr(hw); @@ -7005,10 +7005,9 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) dev->mem_end = dev->mem_start + reg_len - 1; dev->irq = pdev->irq; if (MAIN_PORT == i) - memcpy(dev->dev_addr, hw_priv->hw.override_addr, - ETH_ALEN); + eth_hw_addr_set(dev, hw_priv->hw.override_addr); else { - memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); + eth_hw_addr_set(dev, sw->other_addr); if (ether_addr_equal(sw->other_addr, hw->override_addr)) dev->dev_addr[5] += port->first_port; } diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 09cdc2f2e7ff..634ac7649c43 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -517,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - ether_addr_copy(dev->dev_addr, address->sa_data); + eth_hw_addr_set(dev, address->sa_data); return enc28j60_set_hw_macaddr(dev); } @@ -1539,7 +1539,6 @@ static const struct net_device_ops enc28j60_netdev_ops = { static int enc28j60_probe(struct spi_device *spi) { - unsigned char macaddr[ETH_ALEN]; struct net_device *dev; struct enc28j60_net *priv; int ret = 0; @@ -1572,9 +1571,7 @@ static int enc28j60_probe(struct spi_device *spi) goto error_irq; } - if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr))) - ether_addr_copy(dev->dev_addr, macaddr); - else + if (device_get_ethdev_address(&spi->dev, dev)) eth_hw_addr_random(dev); enc28j60_set_hw_macaddr(dev); diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 0bc6b3176fbf..79167c3a3179 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -761,7 +761,7 @@ static int encx24j600_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + eth_hw_addr_set(dev, address->sa_data); return encx24j600_set_hw_macaddr(dev); } @@ -1125,4 +1125,3 @@ module_spi_driver(encx24j600_spi_net_driver); MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 9e8561cdc32a..03d02403c19e 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -816,7 +816,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) eth_random_addr(adapter->mac_address); } lan743x_mac_set_address(adapter, adapter->mac_address); - ether_addr_copy(netdev->dev_addr, adapter->mac_address); + eth_hw_addr_set(netdev, adapter->mac_address); return 0; } @@ -2645,7 +2645,7 @@ static int lan743x_netdev_set_mac_address(struct net_device *netdev, ret = eth_prepare_mac_addr_change(netdev, sock_addr); if (ret) return ret; - ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + eth_hw_addr_set(netdev, sock_addr->sa_data); lan743x_mac_set_address(adapter, sock_addr->sa_data); lan743x_rfe_update_mac_address(adapter); return 0; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index cbece6e9bff2..c6eb0b70dd3f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5) } iomem[idx] = devm_ioremap(sparx5->dev, iores[idx]->start, - iores[idx]->end - iores[idx]->start - + 1); + resource_size(iores[idx])); if (!iomem[idx]) { dev_err(sparx5->dev, "Unable to get switch registers: %s\n", iores[idx]->name); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index cb68eaaac881..b21ebaa32d7e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -162,7 +162,7 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p) sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid); /* Record the address */ - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index d5c485a6d284..7c7a5fb91f79 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -363,7 +363,7 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, } hwc_cq->gdma_cq = cq; - comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL); + comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL); if (!comp_buf) { err = -ENOMEM; goto out; @@ -580,7 +580,7 @@ static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth, return err; } - ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL); + ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 030ae89f3a33..d65697c239c8 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1610,7 +1610,7 @@ static int mana_init_port(struct net_device *ndev) if (apc->num_queues > apc->max_queues) apc->num_queues = apc->max_queues; - ether_addr_copy(ndev->dev_addr, apc->mac_addr); + eth_hw_addr_set(ndev, apc->mac_addr); return 0; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 49def6934cad..15179b9529e1 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -65,7 +65,7 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, address->sa_data); moxart_update_mac_address(ndev); return 0; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index b6a73d151dec..8dd8c7f425d2 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM - depends on OF_NET + depends on OF select MSCC_OCELOT_SWITCH_LIB select GENERIC_PHY help diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a08e4f530c1c..520a75b57866 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -951,7 +951,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, ocelot_ifh_set_bypass(ifh, 1); ocelot_ifh_set_dest(ifh, BIT_ULL(port)); ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); - ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb)); + ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb)); ocelot_ifh_set_rew_op(ifh, rew_op); for (i = 0; i < OCELOT_TAG_LEN / 4; i++) diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 8b843d3c9189..769a8159373e 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -142,17 +142,77 @@ ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain) return NULL; } +static int +ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port, + struct ocelot_vcap_filter *filter, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + + if (!ocelot_port->vlan_aware) { + NL_SET_ERR_MSG_MOD(extack, + "Can only modify VLAN under VLAN aware bridge"); + return -EOPNOTSUPP; + } + + filter->action.vid_replace_ena = true; + filter->action.pcp_dei_ena = true; + filter->action.vid = a->vlan.vid; + filter->action.pcp = a->vlan.prio; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + + return 0; +} + +static int +ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + enum ocelot_tag_tpid_sel tpid; + + switch (ntohs(a->vlan.proto)) { + case ETH_P_8021Q: + tpid = OCELOT_TAG_TPID_SEL_8021Q; + break; + case ETH_P_8021AD: + tpid = OCELOT_TAG_TPID_SEL_8021AD; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify custom TPID"); + return -EOPNOTSUPP; + } + + filter->action.tag_a_tpid_sel = tpid; + filter->action.push_outer_tag = OCELOT_ES0_TAG; + filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID; + filter->action.vid_a_val = a->vlan.vid; + filter->action.pcp_a_val = a->vlan.prio; + filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + + return 0; +} + static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, bool ingress, struct flow_cls_offload *f, struct ocelot_vcap_filter *filter) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; struct netlink_ext_ack *extack = f->common.extack; bool allow_missing_goto_target = false; const struct flow_action_entry *a; enum ocelot_tag_tpid_sel tpid; int i, chain, egress_port; u64 rate; + int err; if (!flow_action_basic_hw_stats_check(&f->rule->action, f->common.extack)) @@ -273,26 +333,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->type = OCELOT_VCAP_FILTER_OFFLOAD; break; case FLOW_ACTION_VLAN_MANGLE: - if (filter->block_id != VCAP_IS1) { - NL_SET_ERR_MSG_MOD(extack, - "VLAN modify action can only be offloaded to VCAP IS1"); - return -EOPNOTSUPP; - } - if (filter->goto_target != -1) { + if (filter->block_id == VCAP_IS1) { + err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port, + filter, a, + extack); + } else if (filter->block_id == VCAP_ES0) { + err = ocelot_flower_parse_egress_vlan_modify(filter, a, + extack); + } else { NL_SET_ERR_MSG_MOD(extack, - "Last action must be GOTO"); - return -EOPNOTSUPP; + "VLAN modify action can only be offloaded to VCAP IS1 or ES0"); + err = -EOPNOTSUPP; } - if (!ocelot_port->vlan_aware) { - NL_SET_ERR_MSG_MOD(extack, - "Can only modify VLAN under VLAN aware bridge"); - return -EOPNOTSUPP; - } - filter->action.vid_replace_ena = true; - filter->action.pcp_dei_ena = true; - filter->action.vid = a->vlan.vid; - filter->action.pcp = a->vlan.prio; - filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + if (err) + return err; break; case FLOW_ACTION_PRIORITY: if (filter->block_id != VCAP_IS1) { @@ -340,7 +394,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, } filter->action.tag_a_tpid_sel = tpid; filter->action.push_outer_tag = OCELOT_ES0_TAG; - filter->action.tag_a_vid_sel = 1; + filter->action.tag_a_vid_sel = OCELOT_ES0_VID; filter->action.vid_a_val = a->vlan.vid; filter->action.pcp_a_val = a->vlan.prio; filter->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -678,6 +732,31 @@ static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot, return 0; } +/* If we have an egress VLAN modification rule, we need to actually write the + * delta between the input VLAN (from the key) and the output VLAN (from the + * action), but the action was parsed first. So we need to patch the delta into + * the action here. + */ +static int +ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) +{ + if (filter->block_id != VCAP_ES0 || + filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID) + return 0; + + if (filter->vlan.vid.mask != VLAN_VID_MASK) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 VLAN rewriting needs a full VLAN in the key"); + return -EOPNOTSUPP; + } + + filter->action.vid_a_val -= filter->vlan.vid.value; + filter->action.vid_a_val &= VLAN_VID_MASK; + + return 0; +} + int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { @@ -701,6 +780,12 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, return ret; } + ret = ocelot_flower_patch_es0_vlan_modify(filter, extack); + if (ret) { + kfree(filter); + return ret; + } + /* The non-optional GOTOs for the TCAM skeleton don't need * to be actually offloaded. */ diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 2545727fd5b2..9992bf06311d 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -606,7 +606,7 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p) /* Then forget the previous one. */ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid); - ether_addr_copy(dev->dev_addr, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); return 0; } @@ -1705,7 +1705,7 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, NETIF_F_HW_TC; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; - memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); + eth_hw_addr_set(dev, ocelot->base_mac); dev->dev_addr[ETH_ALEN - 1] += port; ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 291ae6817c26..5d01993f6be0 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -1134,10 +1134,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_put_ports; - err = devlink_register(devlink); - if (err) - goto out_ocelot_deinit; - err = mscc_ocelot_init_ports(pdev, ports); if (err) goto out_ocelot_devlink_unregister; @@ -1160,6 +1156,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); of_node_put(ports); + devlink_register(devlink); dev_info(&pdev->dev, "Ocelot switch probed\n"); @@ -1169,8 +1166,6 @@ out_ocelot_release_ports: mscc_ocelot_release_ports(ocelot); mscc_ocelot_teardown_devlink_ports(ocelot); out_ocelot_devlink_unregister: - devlink_unregister(devlink); -out_ocelot_deinit: ocelot_deinit(ocelot); out_put_ports: of_node_put(ports); @@ -1183,11 +1178,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); + devlink_unregister(ocelot->devlink); ocelot_deinit_timestamp(ocelot); ocelot_devlink_sb_unregister(ocelot); mscc_ocelot_release_ports(ocelot); mscc_ocelot_teardown_devlink_ports(ocelot); - devlink_unregister(ocelot->devlink); ocelot_deinit(ocelot); unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); unregister_switchdev_notifier(&ocelot_switchdev_nb); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index c1a75b08ced7..5736fcdafd7a 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -796,7 +796,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt) return status; } -static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) +static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, + const u8 * addr) { struct myri10ge_cmd cmd; int status; @@ -3022,7 +3023,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) } /* change the dev structure */ - memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, sa->sa_data); return 0; } @@ -3738,7 +3739,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct myri10ge_priv *mgp; struct device *dev = &pdev->dev; - int i; int status = -ENXIO; int dac_enabled; unsigned hdr_offset, ss_offset; @@ -3828,8 +3828,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (status) goto abort_with_ioremap; - for (i = 0; i < ETH_ALEN; i++) - netdev->dev_addr[i] = mgp->mac_addr[i]; + eth_hw_addr_set(netdev, mgp->mac_addr); myri10ge_select_firmware(mgp); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 3b6b2e61139e..d1c32c65db05 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5202,7 +5202,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); /* store the MAC address in CAM */ return do_s2io_prog_unicast(dev, dev->dev_addr); @@ -5217,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p) * as defined in errno.h file on failure. */ -static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) +static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr) { struct s2io_nic *sp = netdev_priv(dev); register u64 mac_addr = 0, perm_addr = 0; @@ -7954,7 +7954,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Set the factory defined MAC address initially */ dev->addr_len = ETH_ALEN; - memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr); /* initialize number of multicast & unicast MAC entries variables */ if (sp->device_type == XFRAME_I_DEVICE) { diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index 5a6032212c19..a4266d1544ab 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1073,7 +1073,7 @@ static void s2io_reset(struct s2io_nic * sp); static int s2io_poll_msix(struct napi_struct *napi, int budget); static int s2io_poll_inta(struct napi_struct *napi, int budget); static void s2io_init_pci(struct s2io_nic * sp); -static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); +static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr); static void s2io_alarm_handle(struct timer_list *t); static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index df4a3f3da83a..1969009a91e7 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1328,7 +1328,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) } if (unlikely(!is_vxge_card_up(vdev))) { - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return VXGE_HW_OK; } @@ -1341,7 +1341,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) return -EINVAL; } - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return status; } @@ -4663,7 +4663,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) /* Store the fw version for ethttool option */ strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); - memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); + eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr); /* Copy the station mac address to the list */ for (i = 0; i < vdev->no_of_vpath; i++) { diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 605a1617b195..5d3df28c648f 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -305,7 +305,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, return; } - ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + eth_hw_addr_set(nn->dp.netdev, mac_addr); ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c index 36491835ac65..db297ee4d7ad 100644 --- a/drivers/net/ethernet/netronome/nfp/devlink_param.c +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf) if (err <= 0) return err; - err = devlink_params_register(devlink, nfp_devlink_params, - ARRAY_SIZE(nfp_devlink_params)); - if (err) - return err; - - devlink_params_publish(devlink); - return 0; + return devlink_params_register(devlink, nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); } void nfp_devlink_params_unregister(struct nfp_pf *pf) diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index ab70179728f6..dfb4468fe287 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -837,7 +837,7 @@ nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) } static int -__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) +__nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del) { struct nfp_tun_mac_addr_offload payload; @@ -886,7 +886,7 @@ static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) } static struct nfp_tun_offloaded_mac * -nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac) +nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac) { struct nfp_flower_priv *priv = app->priv; @@ -1005,7 +1005,7 @@ err_free_ida: static int nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, - u8 *mac, bool mod) + const u8 *mac, bool mod) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_repr_priv *repr_priv; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index d10a93801344..751f76cd4f79 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -55,7 +55,7 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, return; } - ether_addr_copy(netdev->dev_addr, eth_port->mac_addr); + eth_hw_addr_set(netdev, eth_port->mac_addr); ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); } @@ -701,10 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_unmap; - err = devlink_register(devlink); - if (err) - goto err_app_clean; - err = nfp_shared_buf_register(pf); if (err) goto err_devlink_unreg; @@ -734,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_stop_app; mutex_unlock(&pf->lock); + devlink_register(devlink); return 0; @@ -751,8 +748,6 @@ err_shared_buf_unreg: nfp_shared_buf_unregister(pf); err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); - devlink_unregister(devlink); -err_app_clean: nfp_net_pf_app_clean(pf); err_unmap: nfp_net_pci_unmap_mem(pf); @@ -763,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) { struct nfp_net *nn, *next; + devlink_unregister(priv_to_devlink(pf)); mutex_lock(&pf->lock); list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { if (!nfp_net_is_data_vnic(nn)) @@ -779,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf) nfp_devlink_params_unregister(pf); nfp_shared_buf_unregister(pf); - devlink_unregister(priv_to_devlink(pf)); nfp_net_pf_free_irqs(pf); nfp_net_pf_app_clean(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 3b8e675087de..369f6ae700c7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -499,8 +499,7 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs) { struct nfp_reprs *reprs; - reprs = kzalloc(sizeof(*reprs) + - num_reprs * sizeof(struct net_device *), GFP_KERNEL); + reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL); if (!reprs) return NULL; reprs->num_reprs = num_reprs; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index c0e2f4394aef..87f2268b16d6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -58,7 +58,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn) return; } - ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + eth_hw_addr_set(nn->dp.netdev, mac_addr); ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 346145d3180e..cfeb7620ae20 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1283,7 +1283,7 @@ static int nixge_probe(struct platform_device *pdev) mac_addr = nixge_get_nvmem_address(&pdev->dev); if (mac_addr && is_valid_ether_addr(mac_addr)) { - ether_addr_copy(ndev->dev_addr, mac_addr); + eth_hw_addr_set(ndev, mac_addr); kfree(mac_addr); } else { eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ef3fb4cc90af..9b530d7509a4 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3175,7 +3175,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) return -EADDRNOTAVAIL; /* synchronized against open : rtnl_lock() held by caller */ - memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, macaddr->sa_data); if (netif_running(dev)) { netif_tx_lock_bh(dev); @@ -5711,6 +5711,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) u32 phystate_orig = 0, phystate; int phyinitialized = 0; static int printed_version; + u8 mac[ETH_ALEN]; if (!printed_version++) pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n", @@ -5884,50 +5885,52 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) txreg = readl(base + NvRegTransmitPoll); if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { /* mac address is already in correct order */ - dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; - dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; - dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; - dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; - dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; - dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + mac[0] = (np->orig_mac[0] >> 0) & 0xff; + mac[1] = (np->orig_mac[0] >> 8) & 0xff; + mac[2] = (np->orig_mac[0] >> 16) & 0xff; + mac[3] = (np->orig_mac[0] >> 24) & 0xff; + mac[4] = (np->orig_mac[1] >> 0) & 0xff; + mac[5] = (np->orig_mac[1] >> 8) & 0xff; } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { /* mac address is already in correct order */ - dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; - dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; - dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; - dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; - dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; - dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + mac[0] = (np->orig_mac[0] >> 0) & 0xff; + mac[1] = (np->orig_mac[0] >> 8) & 0xff; + mac[2] = (np->orig_mac[0] >> 16) & 0xff; + mac[3] = (np->orig_mac[0] >> 24) & 0xff; + mac[4] = (np->orig_mac[1] >> 0) & 0xff; + mac[5] = (np->orig_mac[1] >> 8) & 0xff; /* * Set orig mac address back to the reversed version. * This flag will be cleared during low power transition. * Therefore, we should always put back the reversed address. */ - np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + - (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); - np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); + np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) + + (mac[3] << 16) + (mac[2] << 24); + np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8); } else { /* need to reverse mac address to correct order */ - dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; - dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; - dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; - dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; - dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; - dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; + mac[0] = (np->orig_mac[1] >> 8) & 0xff; + mac[1] = (np->orig_mac[1] >> 0) & 0xff; + mac[2] = (np->orig_mac[0] >> 24) & 0xff; + mac[3] = (np->orig_mac[0] >> 16) & 0xff; + mac[4] = (np->orig_mac[0] >> 8) & 0xff; + mac[5] = (np->orig_mac[0] >> 0) & 0xff; writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); dev_dbg(&pci_dev->dev, "%s: set workaround bit for reversed mac addr\n", __func__); } - if (!is_valid_ether_addr(dev->dev_addr)) { + if (is_valid_ether_addr(mac)) { + eth_hw_addr_set(dev, mac); + } else { /* * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab */ dev_err(&pci_dev->dev, "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", - dev->dev_addr); + mac); eth_hw_addr_random(dev); dev_err(&pci_dev->dev, "Using random MAC address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index d29fe562b3de..fbfbf94e0377 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -419,7 +419,7 @@ struct netdata_local { /* * MAC support functions */ -static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) +static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac) { u32 tmp; @@ -1093,7 +1093,7 @@ static int lpc_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&pldat->lock, flags); @@ -1350,7 +1350,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) __lpc_get_mac(pldat, ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { - of_get_mac_address(np, ndev->dev_addr); + of_get_ethdev_address(np, ndev); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index ec3e558f890e..71d234291fc5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2137,7 +2137,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(skaddr->sa_data)) { ret_val = -EADDRNOTAVAIL; } else { - memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, skaddr->sa_data); memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len); pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0); ret_val = 0; @@ -2555,7 +2555,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, goto err_free_adapter; } - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { /* * If the MAC is invalid (or just missing), display a warning diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 7e096b2888b9..f0ace3a0e85c 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -221,7 +221,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); adr0 = dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | @@ -1722,7 +1722,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENODEV; goto out; } - memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); + eth_hw_addr_set(dev, mac->mac_addr); ret = mac_to_intf(mac); if (ret < 0) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 66204106f83e..5e25411ff02f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -19,6 +19,7 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 #define DEVCMD_TIMEOUT 10 +#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) #define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */ #define NORMAL_PPB 1000000000 /* one billion parts per billion */ @@ -69,8 +70,13 @@ struct ionic_admin_ctx { }; int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); -int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err); +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, + const int err, const bool do_msg); int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, + u8 status, int err); + int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); int ionic_set_dma_mask(struct ionic *ionic); int ionic_setup(struct ionic *ionic); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c index 39f59849720d..c58217027564 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -143,8 +143,6 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index); debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type); debugfs_create_u64("drop", 0400, q_dentry, &q->drop); - debugfs_create_u64("stop", 0400, q_dentry, &q->stop); - debugfs_create_u64("wake", 0400, q_dentry, &q->wake); debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops); debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops); @@ -228,6 +226,50 @@ static int netdev_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(netdev); +static int lif_filters_show(struct seq_file *seq, void *v) +{ + struct ionic_lif *lif = seq->private; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int i; + + seq_puts(seq, "id flow state type filter\n"); + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + switch (le16_to_cpu(f->cmd.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n", + f->filter_id, f->flow_id, f->state, + le16_to_cpu(f->cmd.vlan.vlan)); + break; + case IONIC_RX_FILTER_MATCH_MAC: + seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n", + f->filter_id, f->flow_id, f->state, + f->cmd.mac.addr); + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n", + f->filter_id, f->flow_id, f->state, + le16_to_cpu(f->cmd.vlan.vlan), + f->cmd.mac.addr); + break; + case IONIC_RX_FILTER_STEER_PKTCLASS: + seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n", + f->filter_id, f->flow_id, f->state, + le64_to_cpu(f->cmd.pkt_class)); + break; + } + } + } + spin_unlock_bh(&lif->rx_filters.lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lif_filters); + void ionic_debugfs_add_lif(struct ionic_lif *lif) { struct dentry *lif_dentry; @@ -239,6 +281,8 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif) debugfs_create_file("netdev", 0400, lif->dentry, lif->netdev, &netdev_fops); + debugfs_create_file("filters", 0400, lif->dentry, + lif, &lif_filters_fops); } void ionic_debugfs_del_lif(struct ionic_lif *lif) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 0d6858ab511c..d57e80d44c9d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -581,7 +581,6 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, cq->done_color = !cq->done_color; cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); cq_info = &cq->info[cq->tail_idx]; - DEBUG_STATS_CQE_CNT(cq); if (++work_done >= work_to_do) break; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 8311086fb1f4..e5acf3bd62b2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -220,9 +220,6 @@ struct ionic_queue { unsigned int num_descs; unsigned int max_sg_elems; u64 features; - u64 dbell_count; - u64 stop; - u64 wake; u64 drop; struct ionic_dev *idev; unsigned int type; @@ -269,7 +266,6 @@ struct ionic_cq { bool done_color; unsigned int num_descs; unsigned int desc_size; - u64 compl_count; void *base; dma_addr_t base_pa; } ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index c7d0e195d176..4297ed9024c0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -82,22 +82,16 @@ int ionic_devlink_register(struct ionic *ionic) struct devlink_port_attrs attrs = {}; int err; - err = devlink_register(dl); - if (err) { - dev_warn(ionic->dev, "devlink_register failed: %d\n", err); - return err; - } - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); if (err) { dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); - devlink_unregister(dl); return err; } devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); + devlink_register(dl); return 0; } @@ -105,6 +99,6 @@ void ionic_devlink_unregister(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); - devlink_port_unregister(&ionic->dl_port); devlink_unregister(dl); + devlink_port_unregister(&ionic->dl_port); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 3de1a03839e2..6b45cae39a20 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -11,13 +11,6 @@ #include "ionic_ethtool.h" #include "ionic_stats.h" -static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define IONIC_PRIV_F_SW_DBG_STATS BIT(0) - "sw-dbg-stats", -}; - -#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings) - static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) { u32 i; @@ -59,9 +52,6 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_STATS: count = ionic_get_stats_count(lif); break; - case ETH_SS_PRIV_FLAGS: - count = IONIC_PRIV_FLAGS_COUNT; - break; } return count; } @@ -75,10 +65,6 @@ static void ionic_get_strings(struct net_device *netdev, case ETH_SS_STATS: ionic_get_stats_strings(lif, buf); break; - case ETH_SS_PRIV_FLAGS: - memcpy(buf, ionic_priv_flags_strings, - IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN); - break; } } @@ -691,28 +677,6 @@ static int ionic_set_channels(struct net_device *netdev, return err; } -static u32 ionic_get_priv_flags(struct net_device *netdev) -{ - struct ionic_lif *lif = netdev_priv(netdev); - u32 priv_flags = 0; - - if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - priv_flags |= IONIC_PRIV_F_SW_DBG_STATS; - - return priv_flags; -} - -static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct ionic_lif *lif = netdev_priv(netdev); - - clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state); - if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS) - set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state); - - return 0; -} - static int ionic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { @@ -1013,8 +977,6 @@ static const struct ethtool_ops ionic_ethtool_ops = { .get_strings = ionic_get_strings, .get_ethtool_stats = ionic_get_stats, .get_sset_count = ionic_get_sset_count, - .get_priv_flags = ionic_get_priv_flags, - .set_priv_flags = ionic_set_priv_flags, .get_rxnfc = ionic_get_rxnfc, .get_rxfh_indir_size = ionic_get_rxfh_indir_size, .get_rxfh_key_size = ionic_get_rxfh_key_size, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 7f3322ce044c..63f8a8163b5f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -287,11 +287,9 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) return ionic_adminq_post_wait(lif, &ctx); } -static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) +static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err) { struct ionic_queue *q; - struct ionic_lif *lif; - int err = 0; struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -301,11 +299,12 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) }, }; - if (!qcq) + if (!qcq) { + netdev_err(lif->netdev, "%s: bad qcq\n", __func__); return -ENXIO; + } q = &qcq->q; - lif = q->lif; if (qcq->flags & IONIC_QCQ_F_INTR) { struct ionic_dev *idev = &lif->ionic->idev; @@ -318,17 +317,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) napi_disable(&qcq->napi); } - if (send_to_hw) { - ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); - ctx.cmd.q_control.type = q->type; - ctx.cmd.q_control.index = cpu_to_le32(q->index); - dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", - ctx.cmd.q_control.index, ctx.cmd.q_control.type); + /* If there was a previous fw communcation error, don't bother with + * sending the adminq command and just return the same error value. + */ + if (fw_err == -ETIMEDOUT || fw_err == -ENXIO) + return fw_err; - err = ionic_adminq_post_wait(lif, &ctx); - } + ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); + ctx.cmd.q_control.type = q->type; + ctx.cmd.q_control.index = cpu_to_le32(q->index); + dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); - return err; + return ionic_adminq_post_wait(lif, &ctx); } static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) @@ -1241,137 +1242,6 @@ void ionic_get_stats64(struct net_device *netdev, ns->tx_errors = ns->tx_aborted_errors; } -int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) -{ - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_add = { - .opcode = IONIC_CMD_RX_FILTER_ADD, - .lif_index = cpu_to_le16(lif->index), - .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), - }, - }; - int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - bool mc = is_multicast_ether_addr(addr); - struct ionic_rx_filter *f; - int err = 0; - - memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - - spin_lock_bh(&lif->rx_filters.lock); - f = ionic_rx_filter_by_addr(lif, addr); - if (f) { - /* don't bother if we already have it and it is sync'd */ - if (f->state == IONIC_FILTER_STATE_SYNCED) { - spin_unlock_bh(&lif->rx_filters.lock); - return 0; - } - - /* mark preemptively as sync'd to block any parallel attempts */ - f->state = IONIC_FILTER_STATE_SYNCED; - } else { - /* save as SYNCED to catch any DEL requests while processing */ - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_SYNCED); - } - spin_unlock_bh(&lif->rx_filters.lock); - if (err) - return err; - - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); - - /* Don't bother with the write to FW if we know there's no room, - * we can try again on the next sync attempt. - */ - if ((lif->nucast + lif->nmcast) >= nfilters) - err = -ENOSPC; - else - err = ionic_adminq_post_wait(lif, &ctx); - - spin_lock_bh(&lif->rx_filters.lock); - if (err && err != -EEXIST) { - /* set the state back to NEW so we can try again later */ - f = ionic_rx_filter_by_addr(lif, addr); - if (f && f->state == IONIC_FILTER_STATE_SYNCED) { - f->state = IONIC_FILTER_STATE_NEW; - set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); - } - - spin_unlock_bh(&lif->rx_filters.lock); - - if (err == -ENOSPC) - return 0; - else - return err; - } - - if (mc) - lif->nmcast++; - else - lif->nucast++; - - f = ionic_rx_filter_by_addr(lif, addr); - if (f && f->state == IONIC_FILTER_STATE_OLD) { - /* Someone requested a delete while we were adding - * so update the filter info with the results from the add - * and the data will be there for the delete on the next - * sync cycle. - */ - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_OLD); - } else { - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_SYNCED); - } - - spin_unlock_bh(&lif->rx_filters.lock); - - return err; -} - -int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) -{ - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_del = { - .opcode = IONIC_CMD_RX_FILTER_DEL, - .lif_index = cpu_to_le16(lif->index), - }, - }; - struct ionic_rx_filter *f; - int state; - int err; - - spin_lock_bh(&lif->rx_filters.lock); - f = ionic_rx_filter_by_addr(lif, addr); - if (!f) { - spin_unlock_bh(&lif->rx_filters.lock); - return -ENOENT; - } - - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", - addr, f->filter_id); - - state = f->state; - ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); - ionic_rx_filter_free(lif, f); - - if (is_multicast_ether_addr(addr) && lif->nmcast) - lif->nmcast--; - else if (!is_multicast_ether_addr(addr) && lif->nucast) - lif->nucast--; - - spin_unlock_bh(&lif->rx_filters.lock); - - if (state != IONIC_FILTER_STATE_NEW) { - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; - } - - return 0; -} - static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); @@ -1407,7 +1277,7 @@ void ionic_lif_rx_mode(struct ionic_lif *lif) rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; - /* sync the mac filters */ + /* sync the filters */ ionic_rx_filter_sync(lif); /* check for overflow state @@ -1417,14 +1287,12 @@ void ionic_lif_rx_mode(struct ionic_lif *lif) * to see if we can disable NIC PROMISC */ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - if ((lif->nucast + lif->nmcast) >= nfilters) { + + if (((lif->nucast + lif->nmcast) >= nfilters) || + (lif->max_vlans && lif->nvlans >= lif->max_vlans)) { rx_mode |= IONIC_RX_MODE_F_PROMISC; rx_mode |= IONIC_RX_MODE_F_ALLMULTI; - lif->uc_overflow = true; - lif->mc_overflow = true; - } else if (lif->uc_overflow) { - lif->uc_overflow = false; - lif->mc_overflow = false; + } else { if (!(nd_flags & IFF_PROMISC)) rx_mode &= ~IONIC_RX_MODE_F_PROMISC; if (!(nd_flags & IFF_ALLMULTI)) @@ -1809,59 +1677,30 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_add = { - .opcode = IONIC_CMD_RX_FILTER_ADD, - .lif_index = cpu_to_le16(lif->index), - .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), - .vlan.vlan = cpu_to_le16(vid), - }, - }; int err; - netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); - err = ionic_adminq_post_wait(lif, &ctx); + err = ionic_lif_vlan_add(lif, vid); if (err) return err; - spin_lock_bh(&lif->rx_filters.lock); - err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, - IONIC_FILTER_STATE_SYNCED); - spin_unlock_bh(&lif->rx_filters.lock); + ionic_lif_rx_mode(lif); - return err; + return 0; } static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_admin_ctx ctx = { - .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), - .cmd.rx_filter_del = { - .opcode = IONIC_CMD_RX_FILTER_DEL, - .lif_index = cpu_to_le16(lif->index), - }, - }; - struct ionic_rx_filter *f; - - spin_lock_bh(&lif->rx_filters.lock); - - f = ionic_rx_filter_by_vlan(lif, vid); - if (!f) { - spin_unlock_bh(&lif->rx_filters.lock); - return -ENOENT; - } + int err; - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", - vid, f->filter_id); + err = ionic_lif_vlan_del(lif, vid); + if (err) + return err; - ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); - ionic_rx_filter_free(lif, f); - spin_unlock_bh(&lif->rx_filters.lock); + ionic_lif_rx_mode(lif); - return ionic_adminq_post_wait(lif, &ctx); + return 0; } int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, @@ -1953,19 +1792,19 @@ static void ionic_txrx_disable(struct ionic_lif *lif) if (lif->txqcqs) { for (i = 0; i < lif->nxqs; i++) - err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->txqcqs[i], err); } if (lif->hwstamp_txq) - err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->hwstamp_txq, err); if (lif->rxqcqs) { for (i = 0; i < lif->nxqs; i++) - err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->rxqcqs[i], err); } if (lif->hwstamp_rxq) - err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT)); + err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err); ionic_lif_quiesce(lif); } @@ -2165,7 +2004,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) err = ionic_qcq_enable(lif->txqcqs[i]); if (err) { - derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); + derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err); goto err_out; } } @@ -2187,13 +2026,13 @@ static int ionic_txrx_enable(struct ionic_lif *lif) err_out_hwstamp_tx: if (lif->hwstamp_rxq) - derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT)); + derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr); err_out_hwstamp_rx: i = lif->nxqs; err_out: while (i--) { - derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); - derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); + derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr); + derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); } return err; @@ -2896,6 +2735,9 @@ int ionic_lif_alloc(struct ionic *ionic) snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); + mutex_init(&lif->queue_lock); + mutex_init(&lif->config_lock); + spin_lock_init(&lif->adminq_lock); spin_lock_init(&lif->deferred.lock); @@ -2909,7 +2751,7 @@ int ionic_lif_alloc(struct ionic *ionic) if (!lif->info) { dev_err(dev, "Failed to allocate lif info, aborting\n"); err = -ENOMEM; - goto err_out_free_netdev; + goto err_out_free_mutex; } ionic_debugfs_add_lif(lif); @@ -2944,6 +2786,9 @@ err_out_free_lif_info: dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); lif->info = NULL; lif->info_pa = 0; +err_out_free_mutex: + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); err_out_free_netdev: free_netdev(lif->netdev); lif = NULL; @@ -2974,11 +2819,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) netif_device_detach(lif->netdev); + mutex_lock(&lif->queue_lock); if (test_bit(IONIC_LIF_F_UP, lif->state)) { dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); - mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); - mutex_unlock(&lif->queue_lock); } if (netif_running(lif->netdev)) { @@ -2989,6 +2833,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) ionic_reset(ionic); ionic_qcqs_free(lif); + mutex_unlock(&lif->queue_lock); + dev_info(ionic->dev, "FW Down: LIFs stopped\n"); } @@ -3012,9 +2858,12 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) err = ionic_port_init(ionic); if (err) goto err_out; + + mutex_lock(&lif->queue_lock); + err = ionic_qcqs_alloc(lif); if (err) - goto err_out; + goto err_unlock; err = ionic_lif_init(lif); if (err) @@ -3035,6 +2884,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) goto err_txrx_free; } + mutex_unlock(&lif->queue_lock); + clear_bit(IONIC_LIF_F_FW_RESET, lif->state); ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); @@ -3051,6 +2902,8 @@ err_lifs_deinit: ionic_lif_deinit(lif); err_qcqs_free: ionic_qcqs_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); err_out: dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); } @@ -3084,6 +2937,9 @@ void ionic_lif_free(struct ionic_lif *lif) kfree(lif->dbid_inuse); lif->dbid_inuse = NULL; + mutex_destroy(&lif->config_lock); + mutex_destroy(&lif->queue_lock); + /* free netdev & lif */ ionic_debugfs_del_lif(lif); free_netdev(lif->netdev); @@ -3106,8 +2962,6 @@ void ionic_lif_deinit(struct ionic_lif *lif) ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); - mutex_destroy(&lif->config_lock); - mutex_destroy(&lif->queue_lock); ionic_lif_reset(lif); } @@ -3273,8 +3127,6 @@ int ionic_lif_init(struct ionic_lif *lif) return err; lif->hw_index = le16_to_cpu(comp.hw_index); - mutex_init(&lif->queue_lock); - mutex_init(&lif->config_lock); /* now that we have the hw_index we can figure out our doorbell page */ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 4915184f3efb..9f7ab2f17f93 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -14,9 +14,6 @@ #define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ #define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ -#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) -#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) - #define ADD_ADDR true #define DEL_ADDR false #define CAN_SLEEP true @@ -37,7 +34,6 @@ struct ionic_tx_stats { u64 clean; u64 linearize; u64 crc32_csum; - u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; u64 dma_map_err; u64 hwstamp_valid; u64 hwstamp_invalid; @@ -48,7 +44,6 @@ struct ionic_rx_stats { u64 bytes; u64 csum_none; u64 csum_complete; - u64 buffers_posted; u64 dropped; u64 vlan_stripped; u64 csum_error; @@ -65,11 +60,6 @@ struct ionic_rx_stats { #define IONIC_QCQ_F_RX_STATS BIT(4) #define IONIC_QCQ_F_NOTIFYQ BIT(5) -struct ionic_napi_stats { - u64 poll_count; - u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR]; -}; - struct ionic_qcq { void *q_base; dma_addr_t q_base_pa; @@ -85,7 +75,6 @@ struct ionic_qcq { struct ionic_cq cq; struct ionic_intr_info intr; struct napi_struct napi; - struct ionic_napi_stats napi_stats; unsigned int flags; struct dentry *dentry; }; @@ -142,7 +131,6 @@ struct ionic_lif_sw_stats { enum ionic_lif_state_flags { IONIC_LIF_F_INITED, - IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, IONIC_LIF_F_FILTER_SYNC_NEEDED, @@ -201,11 +189,11 @@ struct ionic_lif { u16 rx_mode; u64 hw_features; bool registered; - bool mc_overflow; - bool uc_overflow; u16 lif_type; unsigned int nmcast; unsigned int nucast; + unsigned int nvlans; + unsigned int max_vlans; char name[IONIC_LIF_NAME_MAX_SZ]; union ionic_lif_identity *identity; @@ -350,37 +338,4 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, void ionic_lif_rx_mode(struct ionic_lif *lif); int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_queue_params *qparam); - -static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell) -{ - struct ionic_txq_desc *desc = &q->txq[q->head_idx]; - u8 num_sg_elems; - - q->dbell_count += dbell; - - num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT) - & IONIC_TXQ_DESC_NSGE_MASK); - if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1)) - num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1; - - q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++; -} - -static inline void debug_stats_napi_poll(struct ionic_qcq *qcq, - unsigned int work_done) -{ - qcq->napi_stats.poll_count++; - - if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1)) - work_done = IONIC_MAX_NUM_NAPI_CNTR - 1; - - qcq->napi_stats.work_done_cntr[work_done]++; -} - -#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) -#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++) -#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell) -#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \ - debug_stats_napi_poll(qcq, work_done) - #endif /* _IONIC_LIF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 6f07bf509efe..875f4ec42efe 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -7,6 +7,7 @@ #include <linux/netdevice.h> #include <linux/utsname.h> #include <generated/utsrelease.h> +#include <linux/ctype.h> #include "ionic.h" #include "ionic_bus.h" @@ -211,24 +212,28 @@ static void ionic_adminq_flush(struct ionic_lif *lif) spin_unlock_irqrestore(&lif->adminq_lock, irqflags); } +void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, + u8 status, int err) +{ + netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(status), err); +} + static int ionic_adminq_check_err(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, - bool timeout) + const bool timeout, + const bool do_msg) { - struct net_device *netdev = lif->netdev; - const char *opcode_str; - const char *status_str; int err = 0; if (ctx->comp.comp.status || timeout) { - opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode); - status_str = ionic_error_to_str(ctx->comp.comp.status); err = timeout ? -ETIMEDOUT : ionic_error_to_errno(ctx->comp.comp.status); - netdev_err(netdev, "%s (%d) failed: %s (%d)\n", - opcode_str, ctx->cmd.cmd.opcode, - timeout ? "TIMEOUT" : status_str, err); + if (do_msg) + ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode, + ctx->comp.comp.status, err); if (timeout) ionic_adminq_flush(lif); @@ -297,24 +302,52 @@ err_out: return err; } -int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err) +int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, + const int err, const bool do_msg) { struct net_device *netdev = lif->netdev; + unsigned long time_limit; + unsigned long time_start; + unsigned long time_done; unsigned long remaining; const char *name; + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + if (err) { - if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { - name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) netdev_err(netdev, "Posting of %s (%d) failed: %d\n", name, ctx->cmd.cmd.opcode, err); - } return err; } - remaining = wait_for_completion_timeout(&ctx->work, - HZ * (ulong)DEVCMD_TIMEOUT); - return ionic_adminq_check_err(lif, ctx, (remaining == 0)); + time_start = jiffies; + time_limit = time_start + HZ * (ulong)DEVCMD_TIMEOUT; + do { + remaining = wait_for_completion_timeout(&ctx->work, + IONIC_ADMINQ_TIME_SLICE); + + /* check for done */ + if (remaining) + break; + + /* interrupt the wait if FW stopped */ + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + if (do_msg) + netdev_err(netdev, "%s (%d) interrupted, FW in reset\n", + name, ctx->cmd.cmd.opcode); + return -ENXIO; + } + + } while (time_before(jiffies, time_limit)); + time_done = jiffies; + + dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n", + __func__, jiffies_to_msecs(time_done - time_start)); + + return ionic_adminq_check_err(lif, ctx, + time_after_eq(time_done, time_limit), + do_msg); } int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) @@ -323,7 +356,16 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) err = ionic_adminq_post(lif, ctx); - return ionic_adminq_wait(lif, ctx, err); + return ionic_adminq_wait(lif, ctx, err, true); +} + +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + int err; + + err = ionic_adminq_post(lif, ctx); + + return ionic_adminq_wait(lif, ctx, err, false); } static void ionic_dev_cmd_clean(struct ionic *ionic) @@ -450,13 +492,23 @@ int ionic_identify(struct ionic *ionic) } mutex_unlock(&ionic->dev_cmd_lock); - dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version); - if (err) { - dev_err(ionic->dev, "Cannot identify ionic: %dn", err); + dev_err(ionic->dev, "Cannot identify ionic: %d\n", err); goto err_out; } + if (isprint(idev->dev_info.fw_version[0]) && + isascii(idev->dev_info.fw_version[0])) + dev_info(ionic->dev, "FW: %.*s\n", + (int)(sizeof(idev->dev_info.fw_version) - 1), + idev->dev_info.fw_version); + else + dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n", + (u8)idev->dev_info.fw_version[0], + (u8)idev->dev_info.fw_version[1], + (u8)idev->dev_info.fw_version[2], + (u8)idev->dev_info.fw_version[3]); + err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC, &ionic->ident.lif); if (err) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index eed2db69d708..887046838b3b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -348,7 +348,7 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm) spin_unlock_irqrestore(&phc->lock, irqflags); - return ionic_adminq_wait(phc->lif, &ctx, err); + return ionic_adminq_wait(phc->lif, &ctx, err, true); } static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta) @@ -373,7 +373,7 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta) spin_unlock_irqrestore(&phc->lock, irqflags); - return ionic_adminq_wait(phc->lif, &ctx, err); + return ionic_adminq_wait(phc->lif, &ctx, err, true); } static int ionic_phc_settime64(struct ptp_clock_info *info, @@ -402,7 +402,7 @@ static int ionic_phc_settime64(struct ptp_clock_info *info, spin_unlock_irqrestore(&phc->lock, irqflags); - return ionic_adminq_wait(phc->lif, &ctx, err); + return ionic_adminq_wait(phc->lif, &ctx, err, true); } static int ionic_phc_gettimex64(struct ptp_clock_info *info, @@ -459,7 +459,7 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info) spin_unlock_irqrestore(&phc->lock, irqflags); - ionic_adminq_wait(phc->lif, &ctx, err); + ionic_adminq_wait(phc->lif, &ctx, err, true); return phc->aux_work_delay; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 69728f9013cb..f6e785f949f9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -239,6 +239,21 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) return NULL; } +static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); + case IONIC_RX_FILTER_MATCH_MAC: + return ionic_rx_filter_by_addr(lif, ac->mac.addr); + default: + netdev_err(lif->netdev, "unsupported filter match %d", + le16_to_cpu(ac->match)); + return NULL; + } +} + int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) { struct ionic_rx_filter *f; @@ -286,6 +301,228 @@ int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) return 0; } +static int ionic_lif_filter_add(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + }; + struct ionic_rx_filter *f; + int nfilters; + int err = 0; + + ctx.cmd.rx_filter_add = *ac; + ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD, + ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index), + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f) { + /* don't bother if we already have it and it is sync'd */ + if (f->state == IONIC_FILTER_STATE_SYNCED) { + spin_unlock_bh(&lif->rx_filters.lock); + return 0; + } + + /* mark preemptively as sync'd to block any parallel attempts */ + f->state = IONIC_FILTER_STATE_SYNCED; + } else { + /* save as SYNCED to catch any DEL requests while processing */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + spin_unlock_bh(&lif->rx_filters.lock); + if (err) + return err; + + /* Don't bother with the write to FW if we know there's no room, + * we can try again on the next sync attempt. + * Since the FW doesn't have a way to tell us the vlan limit, + * we start max_vlans at 0 until we hit the ENOSPC error. + */ + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n", + __func__, ctx.cmd.rx_filter_add.vlan.vlan); + if (lif->max_vlans && lif->nvlans >= lif->max_vlans) + err = -ENOSPC; + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n", + __func__, ctx.cmd.rx_filter_add.mac.addr); + nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + if ((lif->nucast + lif->nmcast) >= nfilters) + err = -ENOSPC; + break; + } + + if (err != -ENOSPC) + err = ionic_adminq_post_wait_nomsg(lif, &ctx); + + spin_lock_bh(&lif->rx_filters.lock); + + if (err && err != -EEXIST) { + /* set the state back to NEW so we can try again later */ + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f && f->state == IONIC_FILTER_STATE_SYNCED) { + f->state = IONIC_FILTER_STATE_NEW; + + /* If -ENOSPC we won't waste time trying to sync again + * until there is a delete that might make room + */ + if (err != -ENOSPC) + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + if (err == -ENOSPC) { + if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN) + lif->max_vlans = lif->nvlans; + return 0; + } + + ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode, + ctx.comp.comp.status, err); + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n", + ctx.cmd.rx_filter_add.vlan.vlan); + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n", + ctx.cmd.rx_filter_add.mac.addr); + break; + } + + return err; + } + + switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + lif->nvlans++; + break; + case IONIC_RX_FILTER_MATCH_MAC: + if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr)) + lif->nmcast++; + else + lif->nucast++; + break; + } + + f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); + if (f && f->state == IONIC_FILTER_STATE_OLD) { + /* Someone requested a delete while we were adding + * so update the filter info with the results from the add + * and the data will be there for the delete on the next + * sync cycle. + */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_OLD); + } else { + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + return err; +} + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }; + + memcpy(&ac.mac.addr, addr, ETH_ALEN); + + return ionic_lif_filter_add(lif, &ac); +} + +int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }; + + return ionic_lif_filter_add(lif, &ac); +} + +static int ionic_lif_filter_del(struct ionic_lif *lif, + struct ionic_rx_filter_add_cmd *ac) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + int state; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_find(lif, ac); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n", + __func__, ac->vlan.vlan, f->filter_id); + lif->nvlans--; + break; + case IONIC_RX_FILTER_MATCH_MAC: + netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n", + __func__, ac->mac.addr, f->filter_id); + if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast) + lif->nucast--; + break; + } + + state = f->state; + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + + spin_unlock_bh(&lif->rx_filters.lock); + + if (state != IONIC_FILTER_STATE_NEW) { + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; + } + + return 0; +} + +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }; + + memcpy(&ac.mac.addr, addr, ETH_ALEN); + + return ionic_lif_filter_del(lif, &ac); +} + +int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid) +{ + struct ionic_rx_filter_add_cmd ac = { + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }; + + return ionic_lif_filter_del(lif, &ac); +} + struct sync_item { struct list_head list; struct ionic_rx_filter f; @@ -340,14 +577,14 @@ loop_out: * they can clear room for some new filters */ list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { - (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr); + (void)ionic_lif_filter_del(lif, &sync_item->f.cmd); list_del(&sync_item->list); devm_kfree(dev, sync_item); } list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { - (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); + (void)ionic_lif_filter_add(lif, &sync_item->f.cmd); list_del(&sync_item->list); devm_kfree(dev, sync_item); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index a66e35f0833b..87b2666f248b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -44,5 +44,7 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); void ionic_rx_filter_sync(struct ionic_lif *lif); int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); int ionic_rx_filters_need_sync(struct ionic_lif *lif); +int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid); +int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid); #endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index c14de5fcedea..fd6806b4a1b9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -151,33 +151,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = { IONIC_RX_STAT_DESC(vlan_stripped), }; -static const struct ionic_stat_desc ionic_txq_stats_desc[] = { - IONIC_TX_Q_STAT_DESC(stop), - IONIC_TX_Q_STAT_DESC(wake), - IONIC_TX_Q_STAT_DESC(drop), - IONIC_TX_Q_STAT_DESC(dbell_count), -}; - -static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = { - IONIC_CQ_STAT_DESC(compl_count), -}; - -static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = { - IONIC_INTR_STAT_DESC(rearm_count), -}; - -static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = { - IONIC_NAPI_STAT_DESC(poll_count), -}; #define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) #define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc) #define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc) #define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc) -#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc) -#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc) -#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc) -#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc) #define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues) @@ -253,21 +231,6 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) total += tx_queues * IONIC_NUM_TX_STATS; total += rx_queues * IONIC_NUM_RX_STATS; - if (test_bit(IONIC_LIF_F_UP, lif->state) && - test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) { - /* tx debug stats */ - total += tx_queues * (IONIC_NUM_DBG_CQ_STATS + - IONIC_NUM_TX_Q_STATS + - IONIC_NUM_DBG_INTR_STATS + - IONIC_MAX_NUM_SG_CNTR); - - /* rx debug stats */ - total += rx_queues * (IONIC_NUM_DBG_CQ_STATS + - IONIC_NUM_DBG_INTR_STATS + - IONIC_NUM_DBG_NAPI_STATS + - IONIC_MAX_NUM_NAPI_CNTR); - } - return total; } @@ -279,22 +242,6 @@ static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf, for (i = 0; i < IONIC_NUM_TX_STATS; i++) ethtool_sprintf(buf, "tx_%d_%s", q_num, ionic_tx_stats_desc[i].name); - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) - ethtool_sprintf(buf, "txq_%d_%s", q_num, - ionic_txq_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) - ethtool_sprintf(buf, "txq_%d_cq_%s", q_num, - ionic_dbg_cq_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) - ethtool_sprintf(buf, "txq_%d_intr_%s", q_num, - ionic_dbg_intr_stats_desc[i].name); - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) - ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i); } static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, @@ -305,22 +252,6 @@ static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf, for (i = 0; i < IONIC_NUM_RX_STATS; i++) ethtool_sprintf(buf, "rx_%d_%s", q_num, ionic_rx_stats_desc[i].name); - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) - ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num, - ionic_dbg_cq_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) - ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num, - ionic_dbg_intr_stats_desc[i].name); - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) - ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num, - ionic_dbg_napi_stats_desc[i].name); - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) - ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i); } static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) @@ -350,7 +281,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf, int q_num) { struct ionic_tx_stats *txstats; - struct ionic_qcq *txqcq; int i; txstats = &lif->txqstats[q_num]; @@ -359,38 +289,12 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf, **buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]); (*buf)++; } - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - txqcq = lif->txqcqs[q_num]; - for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->q, - &ionic_txq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->cq, - &ionic_dbg_cq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->intr, - &ionic_dbg_intr_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { - **buf = txstats->sg_cntr[i]; - (*buf)++; - } } static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf, int q_num) { struct ionic_rx_stats *rxstats; - struct ionic_qcq *rxqcq; int i; rxstats = &lif->rxqstats[q_num]; @@ -399,31 +303,6 @@ static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf, **buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]); (*buf)++; } - - if (!test_bit(IONIC_LIF_F_UP, lif->state) || - !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) - return; - - rxqcq = lif->rxqcqs[q_num]; - for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->cq, - &ionic_dbg_cq_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->intr, - &ionic_dbg_intr_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, - &ionic_dbg_napi_stats_desc[i]); - (*buf)++; - } - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { - **buf = rxqcq->napi_stats.work_done_cntr[i]; - (*buf)++; - } } static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 37c39581b659..94384f5d2a22 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -14,8 +14,6 @@ static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { - DEBUG_STATS_TXQ_POST(q, ring_dbell); - ionic_q_post(q, ring_dbell, cb_func, cb_arg); } @@ -23,8 +21,6 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { ionic_q_post(q, ring_dbell, cb_func, cb_arg); - - DEBUG_STATS_RX_BUFF_CNT(q); } static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) @@ -507,8 +503,6 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, work_done); - return work_done; } @@ -546,8 +540,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, work_done); - return work_done; } @@ -591,9 +583,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done + rx_work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); - DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); - return rx_work_done; } @@ -735,7 +724,6 @@ static void ionic_tx_clean(struct ionic_queue *q, } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { netif_wake_subqueue(q->lif->netdev, qi); - q->wake++; } desc_info->bytes = skb->len; @@ -1174,7 +1162,6 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) if (unlikely(!ionic_q_has_space(q, ndescs))) { netif_stop_subqueue(q->lif->netdev, q->index); - q->stop++; stopped = 1; /* Might race with ionic_tx_clean, check again */ @@ -1269,7 +1256,6 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; err_out_drop: - q->stop++; q->drop++; dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 344ea1143454..b4e094e6f58c 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -500,7 +500,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p) } memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); adapter->macaddr_set(adapter, addr->sa_data); if (netif_running(netdev)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d58e021614cd..d613095b78e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -23,6 +23,8 @@ #include <linux/qed/qed_if.h> #include "qed_debug.h" #include "qed_hsi.h" +#include "qed_dbg_hsi.h" +#include "qed_mfw_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; @@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) } #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ - ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \ + ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \ ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) -#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++) +#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++) #define D_TRINE(val, cond1, cond2, true1, true2, def) \ - (val == (cond1) ? true1 : \ - (val == (cond2) ? true2 : def)) + ((val) == (cond1) ? true1 : \ + ((val) == (cond2) ? true2 : def)) /* forward */ struct qed_ptt_pool; @@ -510,7 +512,7 @@ enum qed_hsi_def_type { struct qed_simd_fp_handler { void *token; - void (*func)(void *); + void (*func)(void *cookie); }; enum qed_slowpath_wq_flag { @@ -703,8 +705,6 @@ struct qed_dev { #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev)) #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_K2(dev) QED_IS_AH(dev) -#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) -#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5) u16 vendor_id; @@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); #define NUM_OF_BTB_BLOCKS(dev) \ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS) - /** - * @brief qed_concrete_to_sw_fid - get the sw function id from - * the concrete value. + * qed_concrete_to_sw_fid(): Get the sw function id from + * the concrete value. * - * @param concrete_fid + * @cdev: Qed dev pointer. + * @concrete_fid: Concrete fid. * - * @return inline u8 + * Return: inline u8. */ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) @@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, } #define PKT_LB_TC 9 -#define MAX_NUM_VOQS_E4 20 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, @@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); -#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) +#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0]) #define QED_IS_CMT(dev) ((dev)->num_hwfns > 1) /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */ #define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin]) @@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb, #define PQ_FLAGS_LLT (BIT(7)) #define PQ_FLAGS_MTC (BIT(8)) -/* physical queue index for cm context intialization */ +/* physical queue index for cm context initialization */ u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags); u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); @@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); +#define GET_GTT_REG_ADDR(__base, __offset, __idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx))) + +#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx))) + /* Other Linux specific common definitions */ #define DP_NAME(cdev) ((cdev)->name) -#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\ - (cdev->regview) + \ - (offset)) +#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\ + ((cdev)->regview) + \ + (offset))) #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) @@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); #define DOORBELL(cdev, db_addr, val) \ writel((u32)val, (void __iomem *)((u8 __iomem *)\ - (cdev->doorbells) + (db_addr))) + ((cdev)->doorbells) + (db_addr))) #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ qed_device_num_ports((_p_hwfn)->cdev)) @@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port); void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port); void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port); void qed_llh_clear_all_filters(struct qed_dev *cdev); +unsigned long qed_get_epoch_time(void); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index cb0f2a3a1ac9..452494f8c298 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -54,22 +54,22 @@ /* connection context union */ union conn_context { - struct e4_core_conn_context core_ctx; - struct e4_eth_conn_context eth_ctx; - struct e4_iscsi_conn_context iscsi_ctx; - struct e4_fcoe_conn_context fcoe_ctx; - struct e4_roce_conn_context roce_ctx; + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; + struct roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { - struct e4_iscsi_task_context iscsi_ctx; - struct e4_fcoe_task_context fcoe_ctx; + struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { - struct e4_rdma_task_context roce_ctx; + struct rdma_task_context roce_ctx; }; struct src_ent { diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 8adb7ed0c12d..168ce2c50385 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -28,24 +28,23 @@ struct qed_tid_mem { }; /** - * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid + * qed_cxt_get_cid_info(): Returns the context info for a specific cidi. * + * @p_hwfn: HW device data. + * @p_info: In/out. * - * @param p_hwfn - * @param p_info in/out - * - * @return int + * Return: Int. */ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); /** - * @brief qed_cxt_get_tid_mem_info + * qed_cxt_get_tid_mem_info(): Returns the tid mem info. * - * @param p_hwfn - * @param p_info + * @p_hwfn: HW device data. + * @p_info: in/out. * - * @return int + * Return: int. */ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, struct qed_tid_mem *p_info); @@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *vf_cid); /** - * @brief qed_cxt_set_pf_params - Set the PF params for cxt init + * qed_cxt_set_pf_params(): Set the PF params for cxt init. + * + * @p_hwfn: HW device data. + * @rdma_tasks: Requested maximum. * - * @param p_hwfn - * @param rdma_tasks - requested maximum - * @return int + * Return: int. */ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); /** - * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters + * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters. * - * @param p_hwfn - * @param last_line + * @p_hwfn: HW device data. + * @last_line: Last_line. * - * @return int + * Return: Int */ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); /** - * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased + * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased. + * + * @p_hwfn: HW device data. + * @used_lines: Used lines. * - * @param p_hwfn - * @param used_lines + * Return: Int. */ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); /** - * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct + * qed_cxt_mngr_alloc(): Allocate and init the context manager struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_free + * qed_cxt_mngr_free() - Context manager free. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map + * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_setup - Reset the acquired CIDs + * qed_cxt_mngr_setup(): Reset the acquired CIDs. * - * @param p_hwfn + * @p_hwfn: HW device data. */ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path. - * + * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_qm_init_pf - Initailze the QM PF phase, per path + * qed_qm_init_pf(): Initailze the QM PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @is_pf_loading: Is pf pending. * - * @param p_hwfn - * @param p_ptt - * @param is_pf_loading + * Return: Void. */ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_pf_loading); /** - * @brief Reconfigures QM pf on the fly + * qed_qm_reconf(): Reconfigures QM pf on the fly. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_CXT_PF_CID (0xff) /** - * @brief qed_cxt_release - Release a cid + * qed_cxt_release_cid(): Release a cid. * - * @param p_hwfn - * @param cid + * @p_hwfn: HW device data. + * @cid: Cid. + * + * Return: Void. */ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); /** - * @brief qed_cxt_release - Release a cid belonging to a vf-queue + * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @param p_hwfn - * @param cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * Return: Void. */ void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); /** - * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type. * - * @param p_hwfn - * @param type - * @param p_cid + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. * - * @return int + * Return: Int. */ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid); /** - * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type - * for a vf-queue + * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type + * for a vf-queue. * - * @param p_hwfn - * @param type - * @param p_cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @return int + * Return: Int. */ int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid, u8 vfid); @@ -334,7 +346,10 @@ struct qed_cxt_mngr { /* Maximal number of L2 steering filters */ u32 arfs_count; - u8 task_type_id; + u16 iscsi_task_pages; + u16 fcoe_task_pages; + u16 roce_task_pages; + u16 eth_task_pages; u16 task_ctx_size; u16 conn_ctx_size; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h new file mode 100644 index 000000000000..9d5a0c9e1ca0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h @@ -0,0 +1,1491 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ +#ifndef _QED_DBG_HSI_H +#define _QED_DBG_HSI_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> + +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_DMAE, + BLOCK_PTU, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_MSTAT, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_LED, + BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, + BLOCK_BAR0_MAP, + BLOCK_MCP_FIO, + BLOCK_LAST_INIT, + BLOCK_PRS_FC, + BLOCK_PBF_FC, + BLOCK_NIG_LB_FC, + BLOCK_NIG_LB_FC_PLLH, + BLOCK_NIG_TX_FC_PLLH, + BLOCK_NIG_TX_FC, + BLOCK_NIG_RX_FC_PLLH, + BLOCK_NIG_RX_FC, + MAX_BLOCK_ID +}; + +/* binary debug buffer types */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE, + BIN_BUF_DBG_DUMP_REG, + BIN_BUF_DBG_DUMP_MEM, + BIN_BUF_DBG_IDLE_CHK_REGS, + BIN_BUF_DBG_IDLE_CHK_IMMS, + BIN_BUF_DBG_IDLE_CHK_RULES, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, + BIN_BUF_DBG_ATTN_BLOCKS, + BIN_BUF_DBG_ATTN_REGS, + BIN_BUF_DBG_ATTN_INDEXES, + BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_BLOCKS, + BIN_BUF_DBG_BLOCKS_CHIP_DATA, + BIN_BUF_DBG_BUS_LINES, + BIN_BUF_DBG_BLOCKS_USER_DATA, + BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, + BIN_BUF_DBG_RESET_REGS, + BIN_BUF_DBG_PARSING_STRINGS, + MAX_BIN_DBG_BUFFER_TYPE +}; + +/* Attention bit mapping */ +struct dbg_attn_bit_mapping { + u16 data; +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 +}; + +/* Attention block per-type data */ +struct dbg_attn_block_type_data { + u16 names_offset; + u16 reserved1; + u8 num_regs; + u8 reserved2; + u16 regs_offset; + +}; + +/* Block attentions */ +struct dbg_attn_block { + struct dbg_attn_block_type_data per_type_data[2]; +}; + +/* Attention register result */ +struct dbg_attn_reg_result { + u32 data; +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 + u16 block_attn_offset; + u16 reserved; + u32 sts_val; + u32 mask_val; +}; + +/* Attention block result */ +struct dbg_attn_block_result { + u8 block_id; + u8 data; +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 + u16 names_offset; + struct dbg_attn_reg_result reg_results[15]; +}; + +/* Mode header */ +struct dbg_mode_hdr { + u16 data; +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* Attention register */ +struct dbg_attn_reg { + struct dbg_mode_hdr mode; + u16 block_attn_offset; + u32 data; +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 + u32 sts_clr_address; + u32 mask_address; +}; + +/* Attention types */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + +/* Block debug data */ +struct dbg_block { + u8 name[15]; + u8 associated_storm_letter; +}; + +/* Chip-specific block debug data */ +struct dbg_block_chip { + u8 flags; +#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 +#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 +#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 +#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 + u8 dbg_client_id; + u8 reset_reg_id; + u8 reset_reg_bit_offset; + struct dbg_mode_hdr dbg_bus_mode; + u16 reserved1; + u8 reserved2; + u8 num_of_dbg_bus_lines; + u16 dbg_bus_lines_offset; + u32 dbg_select_reg_addr; + u32 dbg_dword_enable_reg_addr; + u32 dbg_shift_reg_addr; + u32 dbg_force_valid_reg_addr; + u32 dbg_force_frame_reg_addr; +}; + +/* Chip-specific block user debug data */ +struct dbg_block_chip_user { + u8 num_of_dbg_bus_lines; + u8 has_latency_events; + u16 names_offset; +}; + +/* Block user debug data */ +struct dbg_block_user { + u8 name[16]; +}; + +/* Block Debug line data */ +struct dbg_bus_line { + u8 data; +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 + u8 group_sizes; +}; + +/* Condition header for registers dump */ +struct dbg_dump_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u8 block_id; /* block ID */ + u8 data_size; /* size in dwords of the data following this header */ +}; + +/* Memory data for registers dump */ +struct dbg_dump_mem { + u32 dword0; +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 + u32 dword1; +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +}; + +/* Register data for registers dump */ +struct dbg_dump_reg { + u32 data; +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF +#define DBG_DUMP_REG_LENGTH_SHIFT 24 +}; + +/* Split header for registers dump */ +struct dbg_dump_split_hdr { + u32 hdr; +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +}; + +/* Condition header for idle check */ +struct dbg_idle_chk_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u16 data_size; /* size in dwords of the data following this header */ +}; + +/* Idle Check condition register */ +struct dbg_idle_chk_cond_reg { + u32 data; +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 + u16 num_entries; + u8 entry_size; + u8 start_entry; +}; + +/* Idle Check info register */ +struct dbg_idle_chk_info_reg { + u32 data; +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 + u16 size; /* register size in dwords */ + struct dbg_mode_hdr mode; /* Mode header */ +}; + +/* Idle Check register */ +union dbg_idle_chk_reg { + struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ + struct dbg_idle_chk_info_reg info_reg; /* info register */ +}; + +/* Idle Check result header */ +struct dbg_idle_chk_result_hdr { + u16 rule_id; /* Failing rule index */ + u16 mem_entry_id; /* Failing memory entry index */ + u8 num_dumped_cond_regs; /* number of dumped condition registers */ + u8 num_dumped_info_regs; /* number of dumped condition registers */ + u8 severity; /* from dbg_idle_chk_severity_types enum */ + u8 reserved; +}; + +/* Idle Check result register header */ +struct dbg_idle_chk_result_reg_hdr { + u8 data; +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 + u8 start_entry; /* index of the first checked entry */ + u16 size; /* register size in dwords */ +}; + +/* Idle Check rule */ +struct dbg_idle_chk_rule { + u16 rule_id; /* Idle Check rule ID */ + u8 severity; /* value from dbg_idle_chk_severity_types enum */ + u8 cond_id; /* Condition ID */ + u8 num_cond_regs; /* number of condition registers */ + u8 num_info_regs; /* number of info registers */ + u8 num_imms; /* number of immediates in the condition */ + u8 reserved1; + u16 reg_offset; /* offset of this rules registers in the idle check + * register array (in dbg_idle_chk_reg units). + */ + u16 imm_offset; /* offset of this rules immediate values in the + * immediate values array (in dwords). + */ +}; + +/* Idle Check rule parsing data */ +struct dbg_idle_chk_rule_parsing_data { + u32 data; +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +}; + +/* Idle check severity types */ +enum dbg_idle_chk_severity_types { + /* idle check failure should cause an error */ + IDLE_CHK_SEVERITY_ERROR, + /* idle check failure should cause an error only if theres no traffic */ + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, + /* idle check failure should cause a warning */ + IDLE_CHK_SEVERITY_WARNING, + MAX_DBG_IDLE_CHK_SEVERITY_TYPES +}; + +/* Reset register */ +struct dbg_reset_reg { + u32 data; +#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF +#define DBG_RESET_REG_ADDR_SHIFT 0 +#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 +#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 +#define DBG_RESET_REG_RESERVED_MASK 0x7F +#define DBG_RESET_REG_RESERVED_SHIFT 25 +}; + +/* Debug Bus block data */ +struct dbg_bus_block_data { + u8 enable_mask; + u8 right_shift; + u8 force_valid_mask; + u8 force_frame_mask; + u8 dword_mask; + u8 line_num; + u8 hw_id; + u8 flags; +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F +#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 +}; + +enum dbg_bus_clients { + DBG_BUS_CLIENT_RBCN, + DBG_BUS_CLIENT_RBCP, + DBG_BUS_CLIENT_RBCR, + DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCF, + DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_OTHER_ENGINE, + DBG_BUS_CLIENT_TIMESTAMP, + DBG_BUS_CLIENT_CPU, + DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCQ, + DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCV, + MAX_DBG_BUS_CLIENTS +}; + +/* Debug Bus constraint operation types */ +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ, + DBG_BUS_CONSTRAINT_OP_NE, + DBG_BUS_CONSTRAINT_OP_LT, + DBG_BUS_CONSTRAINT_OP_LTC, + DBG_BUS_CONSTRAINT_OP_LE, + DBG_BUS_CONSTRAINT_OP_LEC, + DBG_BUS_CONSTRAINT_OP_GT, + DBG_BUS_CONSTRAINT_OP_GTC, + DBG_BUS_CONSTRAINT_OP_GE, + DBG_BUS_CONSTRAINT_OP_GEC, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + +/* Debug Bus trigger state data */ +struct dbg_bus_trigger_state_data { + u8 msg_len; + u8 constraint_dword_mask; + u8 storm_id; + u8 reserved; +}; + +/* Debug Bus memory address */ +struct dbg_bus_mem_addr { + u32 lo; + u32 hi; +}; + +/* Debug Bus PCI buffer data */ +struct dbg_bus_pci_buf_data { + struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ + struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ + u32 size; /* PCI buffer size in bytes */ +}; + +/* Debug Bus Storm EID range filter params */ +struct dbg_bus_storm_eid_range_params { + u8 min; /* Minimal event ID to filter on */ + u8 max; /* Maximal event ID to filter on */ +}; + +/* Debug Bus Storm EID mask filter params */ +struct dbg_bus_storm_eid_mask_params { + u8 val; /* Event ID value */ + u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ +}; + +/* Debug Bus Storm EID filter params */ +union dbg_bus_storm_eid_params { + struct dbg_bus_storm_eid_range_params range; + struct dbg_bus_storm_eid_mask_params mask; +}; + +/* Debug Bus Storm data */ +struct dbg_bus_storm_data { + u8 enabled; + u8 mode; + u8 hw_id; + u8 eid_filter_en; + u8 eid_range_not_mask; + u8 cid_filter_en; + union dbg_bus_storm_eid_params eid_filter_params; + u32 cid; +}; + +/* Debug Bus data */ +struct dbg_bus_data { + u32 app_version; + u8 state; + u8 mode_256b_en; + u8 num_enabled_blocks; + u8 num_enabled_storms; + u8 target; + u8 one_shot_en; + u8 grc_input_en; + u8 timestamp_input_en; + u8 filter_en; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; + u8 trigger_en; + u8 filter_constraint_dword_mask; + u8 next_trigger_state; + u8 next_constraint_id; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 filter_msg_len; + u8 rcv_from_other_engine; + u8 blocks_dword_mask; + u8 blocks_dword_overlap; + u32 hw_id_mask; + struct dbg_bus_pci_buf_data pci_buf; + struct dbg_bus_block_data blocks[132]; + struct dbg_bus_storm_data storms[6]; +}; + +/* Debug bus states */ +enum dbg_bus_states { + DBG_BUS_STATE_IDLE, + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING, + DBG_BUS_STATE_STOPPED, + MAX_DBG_BUS_STATES +}; + +/* Debug Bus Storm modes */ +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF, + DBG_BUS_STORM_MODE_PRAM_ADDR, + DBG_BUS_STORM_MODE_DRA_RW, + DBG_BUS_STORM_MODE_DRA_W, + DBG_BUS_STORM_MODE_LD_ST_ADDR, + DBG_BUS_STORM_MODE_DRA_FSM, + DBG_BUS_STORM_MODE_FAST_DBGMUX, + DBG_BUS_STORM_MODE_RH, + DBG_BUS_STORM_MODE_RH_WITH_STORE, + DBG_BUS_STORM_MODE_FOC, + DBG_BUS_STORM_MODE_EXT_STORE, + MAX_DBG_BUS_STORM_MODES +}; + +/* Debug bus target IDs */ +enum dbg_bus_targets { + DBG_BUS_TARGET_ID_INT_BUF, + DBG_BUS_TARGET_ID_NIG, + DBG_BUS_TARGET_ID_PCI, + MAX_DBG_BUS_TARGETS +}; + +/* GRC Dump data */ +struct dbg_grc_data { + u8 params_initialized; + u8 reserved1; + u16 reserved2; + u32 param_val[48]; +}; + +/* Debug GRC params */ +enum dbg_grc_params { + DBG_GRC_PARAM_DUMP_TSTORM, + DBG_GRC_PARAM_DUMP_MSTORM, + DBG_GRC_PARAM_DUMP_USTORM, + DBG_GRC_PARAM_DUMP_XSTORM, + DBG_GRC_PARAM_DUMP_YSTORM, + DBG_GRC_PARAM_DUMP_PSTORM, + DBG_GRC_PARAM_DUMP_REGS, + DBG_GRC_PARAM_DUMP_RAM, + DBG_GRC_PARAM_DUMP_PBUF, + DBG_GRC_PARAM_DUMP_IOR, + DBG_GRC_PARAM_DUMP_VFC, + DBG_GRC_PARAM_DUMP_CM_CTX, + DBG_GRC_PARAM_DUMP_PXP, + DBG_GRC_PARAM_DUMP_RSS, + DBG_GRC_PARAM_DUMP_CAU, + DBG_GRC_PARAM_DUMP_QM, + DBG_GRC_PARAM_DUMP_MCP, + DBG_GRC_PARAM_DUMP_DORQ, + DBG_GRC_PARAM_DUMP_CFC, + DBG_GRC_PARAM_DUMP_IGU, + DBG_GRC_PARAM_DUMP_BRB, + DBG_GRC_PARAM_DUMP_BTB, + DBG_GRC_PARAM_DUMP_BMB, + DBG_GRC_PARAM_RESERVD1, + DBG_GRC_PARAM_DUMP_MULD, + DBG_GRC_PARAM_DUMP_PRS, + DBG_GRC_PARAM_DUMP_DMAE, + DBG_GRC_PARAM_DUMP_TM, + DBG_GRC_PARAM_DUMP_SDM, + DBG_GRC_PARAM_DUMP_DIF, + DBG_GRC_PARAM_DUMP_STATIC, + DBG_GRC_PARAM_UNSTALL, + DBG_GRC_PARAM_RESERVED2, + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, + DBG_GRC_PARAM_EXCLUDE_ALL, + DBG_GRC_PARAM_CRASH, + DBG_GRC_PARAM_PARITY_SAFE, + DBG_GRC_PARAM_DUMP_CM, + DBG_GRC_PARAM_DUMP_PHY, + DBG_GRC_PARAM_NO_MCP, + DBG_GRC_PARAM_NO_FW_VER, + DBG_GRC_PARAM_RESERVED3, + DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, + DBG_GRC_PARAM_DUMP_ILT_CDUC, + DBG_GRC_PARAM_DUMP_ILT_CDUT, + DBG_GRC_PARAM_DUMP_CAU_EXT, + MAX_DBG_GRC_PARAMS +}; + +/* Debug status codes */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, + DBG_STATUS_NO_MATCHING_FRAMING_MODE, + DBG_STATUS_VFC_READ_ERROR, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_256B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_RESERVED0, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_RESERVED1, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INSUFFICIENT_HW_IDS, + DBG_STATUS_DBG_BUS_IN_USE, + DBG_STATUS_INVALID_STORM_DBG_MODE, + DBG_STATUS_OTHER_ENGINE_BB_ONLY, + DBG_STATUS_FILTER_SINGLE_HW_ID, + DBG_STATUS_TRIGGER_SINGLE_HW_ID, + DBG_STATUS_MISSING_TRIGGER_STATE_STORM, + MAX_DBG_STATUS +}; + +/* Debug Storms IDs */ +enum dbg_storms { + DBG_TSTORM_ID, + DBG_MSTORM_ID, + DBG_USTORM_ID, + DBG_XSTORM_ID, + DBG_YSTORM_ID, + DBG_PSTORM_ID, + MAX_DBG_STORMS +}; + +/* Idle Check data */ +struct idle_chk_data { + u32 buf_size; + u8 buf_size_set; + u8 reserved1; + u16 reserved2; +}; + +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ +struct dbg_tools_data { + struct dbg_grc_data grc; + struct dbg_bus_data bus; + struct idle_chk_data idle_chk; + u8 mode_enable[40]; + u8 block_in_reset[132]; + u8 chip_id; + u8 hw_type; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; + u8 initialized; + u8 use_dmae; + u8 reserved; + struct pretend_params pretend; + u32 num_regs_read; +}; + +/* ILT Clients */ +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, + ILT_CLI_RGFS, + ILT_CLI_TGFS, + MAX_ILT_CLIENTS +}; + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug + * arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: A pointer to the binary data with debug arrays. + * + * Return: enum dbg status. + */ +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_read_regs(): Reads registers into a buffer (using GRC). + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf: Destination buffer. + * @addr: Source GRC address in dwords. + * @len: Number of registers to read. + * + * Return: Void. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); + +/** + * qed_read_fw_info(): Reads FW info from the chip. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_info: (Out) a pointer to write the FW info into. + * + * Return: True if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); +/** + * qed_dbg_grc_config(): Sets the value of a GRC parameter. + * + * @p_hwfn: HW device data. + * @grc_param: GRC parameter. + * @val: Value to set. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - Grc_param is invalid. + * - Val is outside the allowed boundaries. + */ +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val); + +/** + * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their + * default value. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); +/** + * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for + * GRC Dump. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the collected GRC data into. + * @buf_size_in_dwords:Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified dump buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size + * for idle check results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the idle check + * data. + * + * return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_idle_chk_dump: Performs idle check and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the idle check data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size + * for mcp trace results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the mcp trace data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * - The trace meta data cannot be read (from NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size + * for grc trace fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the reg fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size + * for the IGU fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the IGU fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * - The specified buffer is too small + * - DMAE transaction failed + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_protection_override_get_dump_buf_size(): Returns the required + * buffer size for protection override window results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for protection + * override data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status +qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_protection_override_dump(): Reads protection override window + * entries and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the protection override data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * @return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); +/** + * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer + * size for FW Asserts results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the FW Asserts data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_read_attn(): Reads the attention registers of the specified + * block and type, and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @block: Block ID. + * @attn_type: Attention type. + * @clear_status: Indicates if the attention status should be cleared. + * @results: (OUT) Pointer to write the read results into. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results); + +/** + * qed_dbg_print_attn(): Prints attention registers values in the + * specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); + +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_OFFSET 24 + + char *format_str; +}; + +/* MCP Trace Meta data structure */ +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + +/******************************** Constants **********************************/ + +#define MAX_NAME_LEN 16 + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with + * debug arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: a pointer to the binary data with debug arrays. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_dbg_alloc_user_data(): Allocates user debug data. + * + * @p_hwfn: HW device data. + * @user_data_ptr: (OUT) a pointer to the allocated memory. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr); + +/** + * qed_dbg_get_status_str(): Returns a string for the specified status. + * + * @status: A debug status code. + * + * Return: A string for the specified status. + */ +const char *qed_dbg_get_status_str(enum dbg_status status); + +/** + * qed_get_idle_chk_results_buf_size(): Returns the required buffer size + * for idle check results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); +/** + * qed_print_idle_chk_results(): Prints idle check results + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the idle check results. + * @num_errors: (OUT) number of errors found in idle check. + * @num_warnings: (OUT) number of warnings found in idle check. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, + u32 *num_warnings); + +/** + * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data. + * + * @p_hwfn: HW device data. + * @meta_buf: Meta buffer. + * + * Return: Void. + * + * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to + * no NVRAM access). + */ +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); + +/** + * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size + * for MCP Trace results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: MCP Trace dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Rrror if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_mcp_trace_results(): Prints MCP Trace results + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_dwords: Member of dwords that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @p_hwfn: HW device data. + * @dump_buf: MVP trace dump buffer, starting from the header. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** + * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_bytes: Number of bytes that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf); + +/** + * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** + * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size + * for reg_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_reg_fifo_results(): Prints reg fifo results. + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size + * for igu_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_igu_fifo_results(): Prints IGU fifo results + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the IGU fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_protection_override_results_buf_size(): Returns the required + * buffer size for protection override results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_protection_override_results(): Prints protection override + * results. + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size + * for FW Asserts results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_fw_asserts_results(): Prints FW Asserts results. + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer, starting from the header. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the FW Asserts results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_dbg_parse_attn(): Parses and prints attention registers values in + * the specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index e1798925b444..ea839e605577 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data { extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; #ifdef CONFIG_DCB -int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *); +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params); -int qed_dcbx_config_params(struct qed_hwfn *, - struct qed_ptt *, struct qed_dcbx_set *, bool); +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit); #endif /* QED local interface routines */ int -qed_dcbx_mib_update_event(struct qed_hwfn *, - struct qed_ptt *, enum qed_mib_read_type); +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_mib_read_type type); int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); void qed_dcbx_info_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 6ab3e60d4928..e3edca187ddf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #include <linux/module.h> @@ -10,6 +10,7 @@ #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_dbg_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm) return (r[0] & ~r[1]) != imm[0]; } +static u32 cond14(const u32 *r, const u32 *imm) +{ + return (r[0] | imm[0]) != imm[1]; +} + static u32 cond1(const u32 *r, const u32 *imm) { return r[0] != imm[0]; @@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond11, cond12, cond13, + cond14, }; #define NUM_PHYS_BLOCKS 84 @@ -208,10 +215,61 @@ enum dbg_bus_frame_modes { DBG_BUS_NUM_FRAME_MODES }; +/* Debug bus SEMI frame modes */ +enum dbg_bus_semi_frame_modes { + DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */ + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */ + DBG_BUS_SEMI_NUM_FRAME_MODES +}; + +/* Debug bus filter types */ +enum dbg_bus_filter_types { + DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */ + DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */ + DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */ + DBG_BUS_FILTER_TYPE_ON /* Filter always on */ +}; + +/* Debug bus pre-trigger recording types */ +enum dbg_bus_pre_trigger_types { + DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */ + DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */ + DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */ +}; + +/* Debug bus post-trigger recording types */ +enum dbg_bus_post_trigger_types { + DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */ + DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */ +}; + +/* Debug bus other engine mode */ +enum dbg_bus_other_engine_modes { + DBG_BUS_OTHER_ENGINE_MODE_NONE, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX +}; + +/* DBG block Framing mode definitions */ +struct framing_mode_defs { + u8 id; + u8 blocks_dword_mask; + u8 storms_dword_mask; + u8 semi_framing_mode_id; + u8 full_buf_thr; +}; + /* Chip constant definitions */ struct chip_defs { const char *name; + u8 dwords_per_cycle; + u8 num_framing_modes; u32 num_ilt_pages; + struct framing_mode_defs *framing_modes; }; /* HW type constant definitions */ @@ -334,7 +392,7 @@ struct split_type_defs { #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE #define FIELD_DWORD_OFFSET(type, field) \ - (int)(FIELD_BIT_OFFSET(type, field) / 32) + ((int)(FIELD_BIT_OFFSET(type, field) / 32)) #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32) #define FIELD_BIT_MASK(type, field) \ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ @@ -431,11 +489,13 @@ struct split_type_defs { #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_COMMON_GLOBAL_PARAMS 9 +#define NUM_COMMON_GLOBAL_PARAMS 11 #define MAX_RECURSION_DEPTH 10 +#define FW_IMG_KUKU 0 #define FW_IMG_MAIN 1 +#define FW_IMG_L2B 2 #define REG_FIFO_ELEMENT_DWORDS 2 #define REG_FIFO_DEPTH_ELEMENTS 32 @@ -464,10 +524,25 @@ struct split_type_defs { /***************************** Constant Arrays *******************************/ +/* DBG block framing mode definitions, in descending preference order */ +static struct framing_mode_defs s_framing_mode_defs[4] = { + {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf, + DBG_BUS_SEMI_FRAME_MODE_4FAST, + 10}, + {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW, + 10}, + {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc, + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10}, + {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8, + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10} +}; + /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2}, - {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2} + {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2, + s_framing_mode_defs}, + {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2, + s_framing_mode_defs} }; /* Storm constant definitions array */ @@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, - TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, + TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, + TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT, TCM_REG_CTX_RBC_ACCS, {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX, @@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, - MSEM_REG_DBG_FRAME_MODE_BB_K2, - MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - MSEM_REG_SLOW_DBG_MODE_BB_K2, - MSEM_REG_DBG_MODE1_CFG_BB_K2, + MSEM_REG_DBG_FRAME_MODE, + MSEM_REG_SLOW_DBG_ACTIVE, + MSEM_REG_SLOW_DBG_MODE, + MSEM_REG_DBG_MODE1_CFG, MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_DBG_GPRE_VECT, MCM_REG_CTX_RBC_ACCS, @@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, - USEM_REG_DBG_FRAME_MODE_BB_K2, - USEM_REG_SLOW_DBG_ACTIVE_BB_K2, - USEM_REG_SLOW_DBG_MODE_BB_K2, - USEM_REG_DBG_MODE1_CFG_BB_K2, + USEM_REG_DBG_FRAME_MODE, + USEM_REG_SLOW_DBG_ACTIVE, + USEM_REG_SLOW_DBG_MODE, + USEM_REG_DBG_MODE1_CFG, USEM_REG_SYNC_DBG_EMPTY, USEM_REG_DBG_GPRE_VECT, UCM_REG_CTX_RBC_ACCS, @@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, - XSEM_REG_DBG_FRAME_MODE_BB_K2, - XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - XSEM_REG_SLOW_DBG_MODE_BB_K2, - XSEM_REG_DBG_MODE1_CFG_BB_K2, + XSEM_REG_DBG_FRAME_MODE, + XSEM_REG_SLOW_DBG_ACTIVE, + XSEM_REG_SLOW_DBG_MODE, + XSEM_REG_DBG_MODE1_CFG, XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_DBG_GPRE_VECT, XCM_REG_CTX_RBC_ACCS, @@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, - YSEM_REG_DBG_FRAME_MODE_BB_K2, - YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - YSEM_REG_SLOW_DBG_MODE_BB_K2, - YSEM_REG_DBG_MODE1_CFG_BB_K2, + YSEM_REG_DBG_FRAME_MODE, + YSEM_REG_SLOW_DBG_ACTIVE, + YSEM_REG_SLOW_DBG_MODE, + YSEM_REG_DBG_MODE1_CFG, YSEM_REG_SYNC_DBG_EMPTY, YSEM_REG_DBG_GPRE_VECT, YCM_REG_CTX_RBC_ACCS, @@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, - PSEM_REG_DBG_FRAME_MODE_BB_K2, - PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - PSEM_REG_SLOW_DBG_MODE_BB_K2, - PSEM_REG_DBG_MODE1_CFG_BB_K2, + PSEM_REG_DBG_FRAME_MODE, + PSEM_REG_SLOW_DBG_ACTIVE, + PSEM_REG_SLOW_DBG_MODE, + PSEM_REG_DBG_MODE1_CFG, PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_DBG_GPRE_VECT, PCM_REG_CTX_RBC_ACCS, @@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = { {"asic", 1, 256, 32768}, {"reserved", 0, 0, 0}, {"reserved2", 0, 0, 0}, - {"reserved3", 0, 0, 0} + {"reserved3", 0, 0, 0}, + {"reserved4", 0, 0, 0} }; static struct grc_param_defs s_grc_param_defs[] = { @@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = { static struct phy_defs s_phy_defs[] = { {"nw_phy", NWS_REG_NWS_CMU_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5}, - {"sgmii_phy", MS_REG_MS_CMU_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, - {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, - {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, + {"sgmii_phy", MS_REG_MS_CMU_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, }; static struct split_type_defs s_split_type_defs[] = { @@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = { {"vf"} }; +/******************************** Variables **********************************/ + +/* The version of the calling app */ +static u32 s_app_ver; + /**************************** Private Functions ******************************/ +static void qed_static_asserts(void) +{ +} + /* Reads and returns a single dword from the specified unaligned buffer */ static u32 qed_read_unaligned_dword(u8 *buf) { @@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn) if (dev_data->initialized) return DBG_STATUS_OK; + if (!s_app_ver) + return DBG_STATUS_APP_VERSION_NOT_SET; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; @@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); - /* qed_rq() fetches data in CPU byteorder. Swap it back to - * the device's to get right structure layout. - */ - cpu_to_le32_array(dest, size); - /* Read FW version info from Storm RAM */ size = le32_to_cpu(fw_info_location.size); if (!size || size > sizeof(*fw_info)) @@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); - - cpu_to_le32_array(dest, size); } /* Dumps the specified string to the specified buffer. @@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unexpected debug error: invalid FW version string\n"); switch (fw_info.ver.image_id) { + case FW_IMG_KUKU: + strcpy(fw_img_str, "kuku"); + break; case FW_IMG_MAIN: strcpy(fw_img_str, "main"); break; + case FW_IMG_L2B: + strcpy(fw_img_str, "l2b"); + break; default: strcpy(fw_img_str, "unknown"); break; @@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, s_hw_type_defs[dev_data->hw_type].name); offset += qed_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id); + offset += qed_dump_num_param(dump_buf + offset, + dump, "epoch", qed_get_epoch_time()); if (dev_data->chip_id == CHIP_BB) offset += qed_dump_num_param(dump_buf + offset, dump, "path", QED_PATH_ID(p_hwfn)); @@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, continue; reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_STALL_0_BB_K2; + SEM_FAST_REG_STALL_0; qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); } @@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; const struct dbg_attn_reg *attn_reg_arr; + u32 block_id, sts_clr_address; u8 reg_idx, num_attn_regs; - u32 block_id; for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id]) @@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); + sts_clr_address = reg_data->sts_clr_address; /* If Mode match: clear parity status */ if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) qed_rd(p_hwfn, p_ptt, - DWORDS_TO_BYTES(reg_data-> - sts_clr_address)); + DWORDS_TO_BYTES(sts_clr_address)); } } } +/* Finds the meta data image in NVRAM */ +static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 image_type, + u32 *nvram_offset_bytes, + u32 *nvram_size_bytes) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; + struct mcp_file_att file_att; + int nvm_result; + + /* Call NVRAM get file command */ + nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, + (u32 *)&file_att, false); + + /* Check response */ + if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != + FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; + + /* Update return values */ + *nvram_offset_bytes = file_att.nvm_start_addr; + *nvram_size_bytes = file_att.len; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", + image_type, *nvram_offset_bytes, *nvram_size_bytes); + + /* Check alignment */ + if (*nvram_size_bytes & 0x3) + return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; + + return DBG_STATUS_OK; +} + +/* Reads data from NVRAM */ +static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 nvram_offset_bytes, + u32 nvram_size_bytes, u32 *ret_buf) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; + s32 bytes_left = nvram_size_bytes; + u32 read_offset = 0, param = 0; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "nvram_read: reading image of size %d bytes from NVRAM\n", + nvram_size_bytes); + + do { + bytes_to_copy = + (bytes_left > + MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; + + /* Call NVRAM read command */ + SET_MFW_FIELD(param, + DRV_MB_PARAM_NVM_OFFSET, + nvram_offset_bytes + read_offset); + SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); + if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, param, + &ret_mcp_resp, + &ret_mcp_param, &ret_read_size, + (u32 *)((u8 *)ret_buf + read_offset), + false)) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Check response */ + if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Update read offset */ + read_offset += ret_read_size; + bytes_left -= ret_read_size; + } while (bytes_left > 0); + + return DBG_STATUS_OK; +} + /* Dumps GRC registers section header. Returns the dumped size in dwords. * the following parameters are dumped: * - count: no. of dumped entries @@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, return offset; } -static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 image_type, - u32 *nvram_offset_bytes, - u32 *nvram_size_bytes); - -static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf); - /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, has_dbg_bus = GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_HAS_DBG_BUS); - /* read+clear for NWS parity is not working, skip NWS block */ - if (block_id == BLOCK_NWS) - continue; - if (!is_removed && has_dbg_bus && GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_EVAL_MODE) > 0) { @@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 dwords_read, offset = 0; bool parities_masked = false; + u32 dwords_read, offset = 0; u8 i; *num_dumped_dwords = 0; @@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, */ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 * - dump_buf, + u32 *dump_buf, bool dump, u16 rule_id, const struct dbg_idle_chk_rule *rule, @@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, return offset; } -/* Finds the meta data image in NVRAM */ -static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 image_type, - u32 *nvram_offset_bytes, - u32 *nvram_size_bytes) -{ - u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; - struct mcp_file_att file_att; - int nvm_result; - - /* Call NVRAM get file command */ - nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, - p_ptt, - DRV_MSG_CODE_NVM_GET_FILE_ATT, - image_type, - &ret_mcp_resp, - &ret_mcp_param, - &ret_txn_size, (u32 *)&file_att); - - /* Check response */ - if (nvm_result || - (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) - return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; - - /* Update return values */ - *nvram_offset_bytes = file_att.nvm_start_addr; - *nvram_size_bytes = file_att.len; - - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", - image_type, *nvram_offset_bytes, *nvram_size_bytes); - - /* Check alignment */ - if (*nvram_size_bytes & 0x3) - return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; - - return DBG_STATUS_OK; -} - -/* Reads data from NVRAM */ -static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf) -{ - u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; - s32 bytes_left = nvram_size_bytes; - u32 read_offset = 0, param = 0; - - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "nvram_read: reading image of size %d bytes from NVRAM\n", - nvram_size_bytes); - - do { - bytes_to_copy = - (bytes_left > - MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; - - /* Call NVRAM read command */ - SET_MFW_FIELD(param, - DRV_MB_PARAM_NVM_OFFSET, - nvram_offset_bytes + read_offset); - SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); - if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NVM_READ_NVRAM, param, - &ret_mcp_resp, - &ret_mcp_param, &ret_read_size, - (u32 *)((u8 *)ret_buf + read_offset))) - return DBG_STATUS_NVRAM_READ_FAILED; - - /* Check response */ - if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) - return DBG_STATUS_NVRAM_READ_FAILED; - - /* Update read offset */ - read_offset += ret_read_size; - bytes_left -= ret_read_size; - } while (bytes_left > 0); - - return DBG_STATUS_OK; -} - /* Get info on the MCP Trace data in the scratchpad: * - trace_data_grc_addr (OUT): trace data GRC address in bytes * - trace_data_size (OUT): trace data size in bytes (without the header) @@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, /* Dumps the specified ILT pages to the specified buffer. * Returns the dumped size in dwords. */ -static u32 qed_ilt_dump_pages_range(u32 *dump_buf, - bool dump, - u32 start_page_id, +static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset, + bool *dump, u32 start_page_id, u32 num_pages, struct phys_mem_desc *ilt_pages, - bool dump_page_ids) + bool dump_page_ids, u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) { - u32 page_id, end_page_id, offset = 0; + u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; + u32 page_id, end_page_id, offset = *given_offset; + struct phys_mem_desc *mem_desc = NULL; + bool continue_dump = *dump; + u32 partial_page_size = 0; if (num_pages == 0) return offset; @@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, end_page_id = start_page_id + num_pages - 1; for (page_id = start_page_id; page_id <= end_page_id; page_id++) { - struct phys_mem_desc *mem_desc = &ilt_pages[page_id]; - - /** - * - * if (page_id >= ->p_cxt_mngr->ilt_shadow_size) - * break; - */ - + mem_desc = &ilt_pages[page_id]; if (!ilt_pages[page_id].virt_addr) continue; if (dump_page_ids) { - /* Copy page ID to dump buffer */ - if (dump) + /* Copy page ID to dump buffer + * (if dump is needed and buffer is not full) + */ + if ((continue_dump) && + (offset + 1 > buf_size_in_dwords)) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + if (continue_dump) *(dump_buf + offset) = page_id; offset++; } else { /* Copy page memory to dump buffer */ - if (dump) + if ((continue_dump) && + (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords)) { + if (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords) { + partial_page_size = + buf_size_in_dwords - offset; + memcpy(dump_buf + offset, + mem_desc->virt_addr, + partial_page_size); + continue_dump = false; + actual_dump_size_in_dwords = + offset + partial_page_size; + } + } + + if (continue_dump) memcpy(dump_buf + offset, mem_desc->virt_addr, mem_desc->size); offset += BYTES_TO_DWORDS(mem_desc->size); } } + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; + return offset; } @@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, */ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, u32 *dump_buf, - bool dump, + u32 *given_offset, + bool *dump, u32 valid_conn_pf_pages, u32 valid_conn_vf_pages, struct phys_mem_desc *ilt_pages, - bool dump_page_ids) + bool dump_page_ids, + u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; - u32 pf_start_line, start_page_id, offset = 0; + u32 pf_start_line, start_page_id, offset = *given_offset; u32 cdut_pf_init_pages, cdut_vf_init_pages; u32 cdut_pf_work_pages, cdut_vf_work_pages; u32 base_data_offset, size_param_offset; + u32 src_pages; + u32 section_header_and_param_size; u32 cdut_pf_pages, cdut_vf_pages; + u32 actual_dump_size_in_dwords; + bool continue_dump = *dump; + bool update_size = *dump; const char *section_name; - u8 i; + u32 i; + actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem"; cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn); cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn); @@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages; cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages; pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line; + section_header_and_param_size = qed_dump_section_hdr(NULL, + false, + section_name, + 1) + + qed_dump_num_param(NULL, false, "size", 0); + + if ((continue_dump) && + (offset + section_header_and_param_size > buf_size_in_dwords)) { + continue_dump = false; + update_size = false; + actual_dump_size_in_dwords = offset; + } - offset += - qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1); + offset += qed_dump_section_hdr(dump_buf + offset, + continue_dump, section_name, 1); /* Dump size parameter (0 for now, overwritten with real size later) */ size_param_offset = offset; - offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + offset += qed_dump_num_param(dump_buf + offset, + continue_dump, "size", 0); base_data_offset = offset; /* CDUC pages are ordered as follows: @@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) { /* Dump connection PF pages */ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line; - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - valid_conn_pf_pages, - ilt_pages, dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, valid_conn_pf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump connection VF pages */ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines) - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - valid_conn_vf_pages, - ilt_pages, - dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + valid_conn_vf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); } /* CDUT pages are ordered as follows: @@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, /* Dump task PF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_init_pages - pf_start_line; - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - cdut_pf_work_pages, - ilt_pages, dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, cdut_pf_work_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump task VF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_pages + cdut_vf_init_pages - pf_start_line; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += cdut_vf_pages) - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - cdut_vf_work_pages, - ilt_pages, - dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + cdut_vf_work_pages, ilt_pages, + dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + } + + /*Dump Searcher pages */ + if (clients[ILT_CLI_SRC].active) { + start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line; + src_pages = clients[ILT_CLI_SRC].last.val - + clients[ILT_CLI_SRC].first.val + 1; + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, src_pages, ilt_pages, + dump_page_ids, buf_size_in_dwords, + &actual_dump_size_in_dwords); } /* Overwrite size param */ - if (dump) - qed_dump_num_param(dump_buf + size_param_offset, - dump, "size", offset - base_data_offset); + if (update_size) { + u32 section_size = (*dump == continue_dump) ? + offset - base_data_offset : + actual_dump_size_in_dwords - base_data_offset; + if (section_size > 0) + qed_dump_num_param(dump_buf + size_param_offset, + *dump, "size", section_size); + else if ((section_size == 0) && (*dump != continue_dump)) + actual_dump_size_in_dwords -= + section_header_and_param_size; + } + + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; return offset; } -/* Performs ILT Dump to the specified buffer. +/* Dumps a section containing the global parameters. + * Part of ilt dump process * Returns the dumped size in dwords. */ -static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +static u32 +qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 cduc_page_size, + u32 conn_ctx_size, + u32 cdut_page_size, + u32 *full_dump_size_param_offset, + u32 *actual_dump_size_param_offset) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; - u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0; - u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages; - u32 num_cids_per_page, conn_ctx_size; - u32 cduc_page_size, cdut_page_size; - struct phys_mem_desc *ilt_pages; - u8 conn_type; - - cduc_page_size = 1 << - (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); - cdut_page_size = 1 << - (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); - conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; - num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); - ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 offset = 0; - /* Dump global params - 22 must match number of params below */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, - dump_buf + offset, dump, 22); + dump_buf + offset, + dump, 30); offset += qed_dump_str_param(dump_buf + offset, - dump, "dump-type", "ilt-dump"); + dump, + "dump-type", "ilt-dump"); offset += qed_dump_num_param(dump_buf + offset, dump, - "cduc-page-size", cduc_page_size); + "cduc-page-size", + cduc_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-first-page-id", @@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-pf-pages", - clients - [ILT_CLI_CDUC].pf_total_lines); + clients[ILT_CLI_CDUC].pf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-vf-pages", - clients - [ILT_CLI_CDUC].vf_total_lines); + clients[ILT_CLI_CDUC].vf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "max-conn-ctx-size", conn_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, - "cdut-page-size", cdut_page_size); + "cdut-page-size", + cdut_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-first-page-id", @@ -4711,19 +4852,16 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, p_hwfn->p_cxt_mngr->task_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, - "task-type-id", - p_hwfn->p_cxt_mngr->task_type_id); - offset += qed_dump_num_param(dump_buf + offset, - dump, "first-vf-id-in-pf", p_hwfn->p_cxt_mngr->first_vf_in_pf); - offset += /* 18 */ qed_dump_num_param(dump_buf + offset, - dump, - "num-vfs-in-pf", - p_hwfn->p_cxt_mngr->vf_count); offset += qed_dump_num_param(dump_buf + offset, dump, - "ptr-size-bytes", sizeof(void *)); + "num-vfs-in-pf", + p_hwfn->p_cxt_mngr->vf_count); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ptr-size-bytes", + sizeof(void *)); offset += qed_dump_num_param(dump_buf + offset, dump, "pf-start-line", @@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, dump, "ilt-shadow-size", p_hwfn->p_cxt_mngr->ilt_shadow_size); + + *full_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, "dump-size-full", 0); + + *actual_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, + "dump-size-actual", 0); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "iscsi_task_pages", + p_hwfn->p_cxt_mngr->iscsi_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "fcoe_task_pages", + p_hwfn->p_cxt_mngr->fcoe_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "roce_task_pages", + p_hwfn->p_cxt_mngr->roce_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "eth_task_pages", + p_hwfn->p_cxt_mngr->eth_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-first-page-id", + clients[ILT_CLI_SRC].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-last-page-id", + clients[ILT_CLI_SRC].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-is-active", + clients[ILT_CLI_SRC].active); + /* Additional/Less parameters require matching of number in call to * dump_common_global_params() */ - /* Dump section containing number of PF CIDs per connection type */ + return offset; +} + +/* Dump section containing number of PF CIDs per connection type. + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_pf_cids) +{ + u32 num_pf_cids = 0; + u32 offset = 0; + u8 conn_type; + offset += qed_dump_section_hdr(dump_buf + offset, dump, "num_pf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, "size", NUM_OF_CONNECTION_TYPES_E4); - for (conn_type = 0, valid_conn_pf_cids = 0; - conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { - u32 num_pf_cids = - p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; - + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_pf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; if (dump) *(dump_buf + offset) = num_pf_cids; - valid_conn_pf_cids += num_pf_cids; + *valid_conn_pf_cids += num_pf_cids; } - /* Dump section containing number of VF CIDs per connection type */ - offset += qed_dump_section_hdr(dump_buf + offset, - dump, "num_vf_cids_per_conn_type", 1); + return offset; +} + +/* Dump section containing number of VF CIDs per connection type + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_vf_cids) +{ + u32 num_vf_cids = 0; + u32 offset = 0; + u8 conn_type; + + offset += qed_dump_section_hdr(dump_buf + offset, dump, + "num_vf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, "size", NUM_OF_CONNECTION_TYPES_E4); - for (conn_type = 0, valid_conn_vf_cids = 0; - conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { - u32 num_vf_cids = + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_vf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_vf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf; - if (dump) *(dump_buf + offset) = num_vf_cids; - valid_conn_vf_cids += num_vf_cids; + *valid_conn_vf_cids += num_vf_cids; + } + + return offset; +} + +/* Performs ILT Dump to the specified buffer. + * buf_size_in_dwords - The dumped buffer size. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, u32 buf_size_in_dwords, bool dump) +{ +#if ((!defined VMWARE) && (!defined UEFI)) + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; +#endif + u32 valid_conn_vf_cids = 0, + valid_conn_vf_pages, offset = 0, real_dumped_size = 0; + u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages; + u32 num_cids_per_page, conn_ctx_size; + u32 cduc_page_size, cdut_page_size; + u32 actual_dump_size_in_dwords = 0; + struct phys_mem_desc *ilt_pages; + u32 actul_dump_off = 0; + u32 last_section_size; + u32 full_dump_off = 0; + u32 section_size = 0; + bool continue_dump; + u32 page_id; + + last_section_size = qed_dump_last_section(NULL, 0, false); + cduc_page_size = 1 << + (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + cdut_page_size = 1 << + (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; + num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); + ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + continue_dump = dump; + + /* if need to dump then save memory for the last section + * (last section calculates CRC of dumped data) + */ + if (dump) { + if (buf_size_in_dwords >= last_section_size) { + buf_size_in_dwords -= last_section_size; + } else { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } } - /* Dump section containing physical memory descs for each ILT page */ + /* Dump global params */ + + /* if need to dump then first check that there is enough memory + * in dumped buffer for this section calculate the size of this + * section without dumping. if there is not enough memory - then + * stop the dumping. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + NULL, + false, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, + continue_dump, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + + /* Dump section containing number of PF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_pf_cids(p_hwfn, + NULL, + false, + &valid_conn_pf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_pf_cids); + + /* Dump section containing number of VF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_vf_cids(p_hwfn, + NULL, + false, + &valid_conn_vf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_vf_cids); + + /* Dump section containing physical memory descriptors for each + * ILT page. + */ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size; + + /* If need to dump then first check that there is enough memory + * in dumped buffer for the section header. + */ + if (continue_dump) { + section_size = qed_dump_section_hdr(NULL, + false, + "ilt_page_desc", + 1) + + qed_dump_num_param(NULL, + false, + "size", + num_pages * PAGE_MEM_DESC_SIZE_DWORDS); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + offset += qed_dump_section_hdr(dump_buf + offset, - dump, "ilt_page_desc", 1); + continue_dump, "ilt_page_desc", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, + continue_dump, "size", num_pages * PAGE_MEM_DESC_SIZE_DWORDS); - /* Copy memory descriptors to dump buffer */ - if (dump) { - u32 page_id; - + /* Copy memory descriptors to dump buffer + * If need to dump then dump till the dump buffer size + */ + if (continue_dump) { for (page_id = 0; page_id < num_pages; - page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) - memcpy(dump_buf + offset, - &ilt_pages[page_id], - DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS)); + page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) { + if (continue_dump && + (offset + PAGE_MEM_DESC_SIZE_DWORDS <= + buf_size_in_dwords)) { + memcpy(dump_buf + offset, + &ilt_pages[page_id], + DWORDS_TO_BYTES + (PAGE_MEM_DESC_SIZE_DWORDS)); + } else { + if (continue_dump) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + } } else { offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS; } @@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, num_cids_per_page); /* Dump ILT pages IDs */ - offset += qed_ilt_dump_pages_section(p_hwfn, - dump_buf + offset, - dump, - valid_conn_pf_pages, - valid_conn_vf_pages, - ilt_pages, true); + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, true, buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump ILT pages memory */ - offset += qed_ilt_dump_pages_section(p_hwfn, - dump_buf + offset, - dump, - valid_conn_pf_pages, - valid_conn_vf_pages, - ilt_pages, false); + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, false, buf_size_in_dwords, + &actual_dump_size_in_dwords); + + real_dumped_size = + (continue_dump == dump) ? offset : actual_dump_size_in_dwords; + qed_dump_num_param(dump_buf + full_dump_off, dump, + "full-dump-size", offset + last_section_size); + qed_dump_num_param(dump_buf + actul_dump_off, + dump, + "actual-dump-size", + real_dumped_size + last_section_size); /* Dump last section */ - offset += qed_dump_last_section(dump_buf, offset, dump); + real_dumped_size += qed_dump_last_section(dump_buf, + real_dumped_size, dump); - return offset; + return real_dumped_size; } /***************************** Public Functions *******************************/ @@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, return DBG_STATUS_OK; } +static enum dbg_status qed_dbg_set_app_ver(u32 ver) +{ + if (ver < TOOLS_VERSION) + return DBG_STATUS_UNSUPPORTED_APP_VERSION; + + s_app_ver = ver; + + return DBG_STATUS_OK; +} + bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info) { @@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; + /* Doesn't do anything, needed for compile time asserts */ + qed_static_asserts(); + /* GRC Dump */ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); @@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn, if (status != DBG_STATUS_OK) return status; - *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false); + *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false); return DBG_STATUS_OK; } @@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; - enum dbg_status status; - - *num_dumped_dwords = 0; - - status = qed_dbg_ilt_get_dump_buf_size(p_hwfn, - p_ptt, - &needed_buf_size_in_dwords); - if (status != DBG_STATUS_OK) - return status; - - if (buf_size_in_dwords < needed_buf_size_in_dwords) - return DBG_STATUS_DUMP_BUF_TOO_SMALL; - - *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true); + *num_dumped_dwords = qed_ilt_dump(p_hwfn, + p_ptt, + dump_buf, buf_size_in_dwords, true); /* Reveret GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); @@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = { "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input", /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */ - "When triggering on Storm data, the Storm to trigger on must be specified" + "When triggering on Storm data, the Storm to trigger on must be specified", + + /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */ + "Failed to request MDUMP2 Offsize", + + /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */ + "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data", + + /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */ + "Invalid Signature found at start of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */ + "Invalid Log Size of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */ + "Invalid Log Header of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */ + "Invalid Log Data of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */ + "Could not extract number of ports from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */ + "Could not extract MFW (link) status from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */ + "Could not display linkdump of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */ + "Could not read PHY CFG of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */ + "Could not read PLL Mode of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */ + "Could not read TSCF/TSCE Lane Regs of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */ + "Could not allocate MDUMP2 reg-val internal buffer" }; /* Idle check severity names array */ @@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN]; /**************************** Private Functions ******************************/ +static void qed_user_static_asserts(void) +{ +} + static u32 qed_cyclic_add(u32 a, u32 b, u32 size) { return (a + b) % size; @@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, /* Skip register names until the required reg_id is * reached. */ - for (; reg_id > curr_reg_id; - curr_reg_id++, - parsing_str += strlen(parsing_str) + 1); + for (; reg_id > curr_reg_id; curr_reg_id++) + parsing_str += strlen(parsing_str) + 1; results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *num_errors, u32 *num_warnings) { + u32 num_section_params = 0, num_rules, num_rules_not_dumped; const char *section_name, *param_name, *param_str_val; u32 *dump_buf_end = dump_buf + num_dumped_dwords; - u32 num_section_params = 0, num_rules; /* Offset in results_buf in bytes */ u32 results_offset = 0; @@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, num_section_params, results_buf, &results_offset); - /* Read idle_chk section */ + /* Read idle_chk section + * There may be 1 or 2 idle_chk section parameters: + * - 1st is "num_rules" + * - 2nd is "num_rules_not_dumped" (optional) + */ + dump_buf += qed_read_section_hdr(dump_buf, §ion_name, &num_section_params); - if (strcmp(section_name, "idle_chk") || num_section_params != 1) + if (strcmp(section_name, "idle_chk") || + (num_section_params != 2 && num_section_params != 1)) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, &num_rules); if (strcmp(param_name, "num_rules")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + if (num_section_params > 1) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + &num_rules_not_dumped); + if (strcmp(param_name, "num_rules_not_dumped")) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + } else { + num_rules_not_dumped = 0; + } if (num_rules) { u32 rules_print_size; @@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "\nIdle Check completed successfully\n"); + if (num_rules_not_dumped) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n", + num_rules_not_dumped); + /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; @@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; + /* Doesn't do anything, needed for compile time asserts */ + qed_user_static_asserts(); + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, &parsed_buf_size, true); @@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, reg_result->block_attn_offset; /* Go over attention status bits */ - for (j = 0; j < num_reg_attn; j++, bit_idx++) { + for (j = 0; j < num_reg_attn; j++) { u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; @@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, } /* Check current bit index */ - if (!(reg_result->sts_val & BIT(bit_idx))) - continue; + if (reg_result->sts_val & BIT(bit_idx)) { + /* An attention bit with value=1 was found + * Find attention name + */ + attn_name_offset = + block_attn_name_offsets[attn_idx_val]; + attn_name = attn_name_base + attn_name_offset; + attn_type_str = + (attn_type == + ATTN_TYPE_INTERRUPT ? "Interrupt" : + "Parity"); + masked_str = reg_result->mask_val & + BIT(bit_idx) ? + " [masked]" : ""; + sts_addr = + GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS); + DP_NOTICE(p_hwfn, + "%s (%s) : %s [address 0x%08x, bit %d]%s\n", + block_name, attn_type_str, attn_name, + sts_addr * 4, bit_idx, masked_str); + } - /* An attention bit with value=1 was found - * Find attention name - */ - attn_name_offset = - block_attn_name_offsets[attn_idx_val]; - attn_name = attn_name_base + attn_name_offset; - attn_type_str = - (attn_type == - ATTN_TYPE_INTERRUPT ? "Interrupt" : - "Parity"); - masked_str = reg_result->mask_val & BIT(bit_idx) ? - " [masked]" : ""; - sts_addr = GET_FIELD(reg_result->data, - DBG_ATTN_REG_RESULT_STS_ADDRESS); - DP_NOTICE(p_hwfn, - "%s (%s) : %s [address 0x%08x, bit %d]%s\n", - block_name, attn_type_str, attn_name, - sts_addr * 4, bit_idx, masked_str); + bit_idx++; } } return DBG_STATUS_OK; } -static DEFINE_MUTEX(qed_dbg_lock); - /* Wrapper for unifying the idle_chk and mcp_trace api */ static enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, @@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, &num_warnnings); } +static DEFINE_MUTEX(qed_dbg_lock); + +#define MAX_PHY_RESULT_BUFFER 9000 + +/******************************** Feature Meta data section ******************/ + +#define GRC_NUM_STR_FUNCS 2 +#define IDLE_CHK_NUM_STR_FUNCS 1 +#define MCP_TRACE_NUM_STR_FUNCS 1 +#define REG_FIFO_NUM_STR_FUNCS 1 +#define IGU_FIFO_NUM_STR_FUNCS 1 +#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1 +#define FW_ASSERTS_NUM_STR_FUNCS 1 +#define ILT_NUM_STR_FUNCS 1 +#define PHY_NUM_STR_FUNCS 20 + /* Feature meta data lookup table */ static struct { char *name; + u32 num_funcs; enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *size); enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn, @@ -7411,40 +7865,46 @@ static struct { u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + const struct qed_func_lookup *hsi_func_lookup; } qed_features_lookup[] = { { - "grc", qed_dbg_grc_get_dump_buf_size, - qed_dbg_grc_dump, NULL, NULL}, { - "idle_chk", + "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size, + qed_dbg_grc_dump, NULL, NULL, NULL}, { + "idle_chk", IDLE_CHK_NUM_STR_FUNCS, qed_dbg_idle_chk_get_dump_buf_size, qed_dbg_idle_chk_dump, qed_print_idle_chk_results_wrapper, - qed_get_idle_chk_results_buf_size}, { - "mcp_trace", + qed_get_idle_chk_results_buf_size, + NULL}, { + "mcp_trace", MCP_TRACE_NUM_STR_FUNCS, qed_dbg_mcp_trace_get_dump_buf_size, qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results, - qed_get_mcp_trace_results_buf_size}, { - "reg_fifo", + qed_get_mcp_trace_results_buf_size, + NULL}, { + "reg_fifo", REG_FIFO_NUM_STR_FUNCS, qed_dbg_reg_fifo_get_dump_buf_size, qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results, - qed_get_reg_fifo_results_buf_size}, { - "igu_fifo", + qed_get_reg_fifo_results_buf_size, + NULL}, { + "igu_fifo", IGU_FIFO_NUM_STR_FUNCS, qed_dbg_igu_fifo_get_dump_buf_size, qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results, - qed_get_igu_fifo_results_buf_size}, { - "protection_override", + qed_get_igu_fifo_results_buf_size, + NULL}, { + "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS, qed_dbg_protection_override_get_dump_buf_size, qed_dbg_protection_override_dump, qed_print_protection_override_results, - qed_get_protection_override_results_buf_size}, { - "fw_asserts", + qed_get_protection_override_results_buf_size, + NULL}, { + "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS, qed_dbg_fw_asserts_get_dump_buf_size, qed_dbg_fw_asserts_dump, qed_print_fw_asserts_results, - qed_get_fw_asserts_results_buf_size}, { - "ilt", - qed_dbg_ilt_get_dump_buf_size, - qed_dbg_ilt_dump, NULL, NULL},}; + qed_get_fw_asserts_results_buf_size, + NULL}, { + "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size, + qed_dbg_ilt_dump, NULL, NULL, NULL},}; static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) { @@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; - u32 text_size_bytes, null_char_pos, i; + u32 txt_size_bytes, null_char_pos, i; + u32 *dbuf, dwords; enum dbg_status rc; char *text_buf; @@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, if (!qed_features_lookup[feature_idx].results_buf_size) return DBG_STATUS_OK; + dbuf = (u32 *)feature->dump_buf; + dwords = feature->dumped_dwords; + /* Obtain size of formatted output */ - rc = qed_features_lookup[feature_idx]. - results_buf_size(p_hwfn, (u32 *)feature->dump_buf, - feature->dumped_dwords, &text_size_bytes); + rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn, + dbuf, + dwords, + &txt_size_bytes); if (rc != DBG_STATUS_OK) return rc; - /* Make sure that the allocated size is a multiple of dword (4 bytes) */ - null_char_pos = text_size_bytes - 1; - text_size_bytes = (text_size_bytes + 3) & ~0x3; + /* Make sure that the allocated size is a multiple of dword + * (4 bytes). + */ + null_char_pos = txt_size_bytes - 1; + txt_size_bytes = (txt_size_bytes + 3) & ~0x3; - if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { + if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { DP_NOTICE(p_hwfn->cdev, "formatted size of feature was too small %d. Aborting\n", - text_size_bytes); + txt_size_bytes); return DBG_STATUS_INVALID_ARGS; } - /* Allocate temp text buf */ - text_buf = vzalloc(text_size_bytes); - if (!text_buf) + /* allocate temp text buf */ + text_buf = vzalloc(txt_size_bytes); + if (!text_buf) { + DP_NOTICE(p_hwfn->cdev, + "failed to allocate text buffer. Aborting\n"); return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + } /* Decode feature opcodes to string on temp buf */ - rc = qed_features_lookup[feature_idx]. - print_results(p_hwfn, (u32 *)feature->dump_buf, - feature->dumped_dwords, text_buf); + rc = qed_features_lookup[feature_idx].print_results(p_hwfn, + dbuf, + dwords, + text_buf); if (rc != DBG_STATUS_OK) { vfree(text_buf); return rc; @@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, * The bytes that were added as a result of the dword alignment are also * padded with '\n' characters. */ - for (i = null_char_pos; i < text_size_bytes; i++) + for (i = null_char_pos; i < txt_size_bytes; i++) text_buf[i] = '\n'; /* Dump printable feature to log */ if (p_hwfn->cdev->print_dbg_data) - qed_dbg_print_feature(text_buf, text_size_bytes); + qed_dbg_print_feature(text_buf, txt_size_bytes); - /* Just return the original binary buffer if requested */ + /* Dump binary data as is to the output file */ if (p_hwfn->cdev->dbg_bin_dump) { vfree(text_buf); - return DBG_STATUS_OK; + return rc; } - /* Free the old dump_buf and point the dump_buf to the newly allocagted + /* Free the old dump_buf and point the dump_buf to the newly allocated * and formatted text buffer. */ vfree(feature->dump_buf); feature->dump_buf = text_buf; - feature->buf_size = text_size_bytes; - feature->dumped_dwords = text_size_bytes / 4; + feature->buf_size = txt_size_bytes; + feature->dumped_dwords = txt_size_bytes / 4; + return rc; } @@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; - u32 buf_size_dwords; + u32 buf_size_dwords, *dbuf, *dwords; enum dbg_status rc; DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n", @@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, if (!feature->dump_buf) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; - rc = qed_features_lookup[feature_idx]. - perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf, - feature->buf_size / sizeof(u32), - &feature->dumped_dwords); + dbuf = (u32 *)feature->dump_buf; + dwords = &feature->dumped_dwords; + rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt, + dbuf, + feature->buf_size / + sizeof(u32), + dwords); /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error. - * In this case the buffer holds valid binary data, but we wont able + * In this case the buffer holds valid binary data, but we won't able * to parse it (since parsing relies on data in NVRAM which is only * accessible when MFW is responsive). skip the formatting but return * success so that binary data is provided. @@ -7777,7 +8252,8 @@ enum debug_print_features { static u32 qed_calc_regdump_header(struct qed_dev *cdev, enum debug_print_features feature, - int engine, u32 feature_size, u8 omit_engine) + int engine, u32 feature_size, + u8 omit_engine, u8 dbg_bin_dump) { u32 res = 0; @@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); - SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); @@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) { u8 cur_engine, omit_engine = 0, org_engine; - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - int grc_params[MAX_DBG_GRC_PARAMS], i; + int grc_params[MAX_DBG_GRC_PARAMS], rc, i; u32 offset = 0, feature_size; - int rc; for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) grc_params[i] = dev_data->grc.param_val[i]; @@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!QED_IS_CMT(cdev)) omit_engine = 1; + cdev->dbg_bin_dump = 1; mutex_lock(&qed_dbg_lock); - cdev->dbg_bin_dump = true; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); @@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); @@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, REG_FIFO, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, REG_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc); @@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IGU_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc); @@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE, + qed_calc_regdump_header(cdev, + PROTECTION_OVERRIDE, cur_engine, - feature_size, omit_engine); + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, @@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, FW_ASSERTS, - cur_engine, feature_size, - omit_engine); + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", @@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) } feature_size = qed_dbg_ilt_size(cdev); - if (!cdev->disable_ilt_dump && - feature_size < ILT_DUMP_MAX_SIZE) { + if (!cdev->disable_ilt_dump && feature_size < + ILT_DUMP_MAX_SIZE) { rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { @@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) qed_calc_regdump_header(cdev, ILT_DUMP, cur_engine, feature_size, - omit_engine); - offset += feature_size + REGDUMP_HEADER_SIZE; + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n", rc); } } - /* GRC dump - must be last because when mcp stuck it will + /* Grc dump - must be last because when mcp stuck it will * clutter idle_chk, reg_fifo, ... */ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) @@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, GRC_DUMP, cur_engine, - feature_size, omit_engine); + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc); @@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine, - feature_size, omit_engine); + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } - /* Re-populate nvm attribute info */ - qed_mcp_nvm_info_free(p_hwfn); - qed_mcp_nvm_info_populate(p_hwfn); - /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + @@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine, - feature_size, omit_engine); + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", + rc); } - /* nvm default */ + /* nvm default */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_DEFAULT_CFG); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, DEFAULT_CFG, + cur_engine, feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", - rc); + QED_NVM_IMAGE_DEFAULT_CFG, + "QED_NVM_IMAGE_DEFAULT_CFG", rc); } /* nvm meta */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_NVM_META); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_NVM_META); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, NVM_META, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, NVM_META, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", + rc); } /* nvm mdump */ @@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, MDUMP, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, MDUMP, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, @@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } - cdev->dbg_bin_dump = false; mutex_unlock(&qed_dbg_lock); + cdev->dbg_bin_dump = 0; return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; u8 cur_engine, org_engine; cdev->disable_ilt_dump = false; @@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) "calculating idle_chk and grcdump register length for current engine\n"); qed_set_debug_engine(cdev, cur_engine); regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + - REGDUMP_HEADER_SIZE + - qed_dbg_protection_override_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); - + REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + + qed_dbg_protection_override_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev); if (ilt_len < ILT_DUMP_MAX_SIZE) { total_ilt_len += ilt_len; @@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) qed_set_debug_engine(cdev, org_engine); /* Engine common */ - regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); + regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev); qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; @@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) int qed_dbg_feature(struct qed_dev *cdev, void *buffer, enum qed_dbg_features feature, u32 *num_dumped_bytes) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; - struct qed_dbg_feature *qed_feature = - &cdev->dbg_features[feature]; + struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; enum dbg_status dbg_rc; struct qed_ptt *p_ptt; int rc = 0; @@ -8119,9 +8616,8 @@ out: int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 buf_size_dwords; enum dbg_status rc; @@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) return qed_feature->buf_size; } +int qed_dbg_phy_size(struct qed_dev *cdev) +{ + /* return max size of phy info and + * phy mac_stat multiplied by the number of ports + */ + return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev)); +} + u8 qed_get_debug_engine(struct qed_dev *cdev) { return cdev->engine_for_debug; @@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev) const u8 *dbg_values = NULL; int i; + /* Sync ver with debugbus qed code */ + qed_dbg_set_app_ver(TOOLS_VERSION); + /* Debug values are after init values. * The offset is the first dword of the file. */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index e71af82d3200..b0d4b937cf4a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ -#ifndef _QED_DEBUGFS_H -#define _QED_DEBUGFS_H +#ifndef _QED_DEBUG_H +#define _QED_DEBUG_H enum qed_dbg_features { DBG_FEATURE_GRC, @@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev); int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_mcp_trace_size(struct qed_dev *cdev); +int qed_dbg_phy_size(struct qed_dev *cdev); int qed_dbg_all_data(struct qed_dev *cdev, void *buffer); int qed_dbg_all_data_size(struct qed_dev *cdev); u8 qed_get_debug_engine(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0410c3604abd..18f3bf7c4dfe 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -25,6 +25,7 @@ #include "qed_dev_api.h" #include "qed_fcoe.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn, } int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]) + u8 ppfid, const u8 mac_addr[ETH_ALEN]) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); @@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev) qed_rdma_info_free(p_hwfn); } + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); - qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); @@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); /* num RLs can't exceed resource amount of rls or vports */ - num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), - RESC_NUM(p_hwfn, QED_VPORT)); + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); /* Make sure after we reserve there's something left */ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) @@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) bool four_port; /* pq and vport bases for this PF */ - qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ qm_info->vport_rl_en = true; @@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) */ /* flags for pq init */ -#define PQ_INIT_SHARE_VPORT (1 << 0) -#define PQ_INIT_PF_RL (1 << 1) -#define PQ_INIT_VF_RL (1 << 2) +#define PQ_INIT_SHARE_VPORT BIT(0) +#define PQ_INIT_PF_RL BIT(1) +#define PQ_INIT_VF_RL BIT(2) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 @@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_no_mem; } - rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); if (rc) goto alloc_err; @@ -2375,6 +2377,49 @@ alloc_err: return rc; } +static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + if (fw_return_code != COMMON_ERR_CODE_ERROR) + goto eqe_unexpected; + + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); + return 0; + } + +eqe_unexpected: + DP_ERR(p_hwfn, + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", + opcode, fw_return_code, echo); + return -EINVAL; +} + +static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + case COMMON_EVENT_VF_FLR: + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, + fw_return_code); + case COMMON_EVENT_FW_ERROR: + return qed_fw_err_handler(p_hwfn, opcode, + le16_to_cpu(echo), data, + fw_return_code); + default: + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", + opcode, echo); + return -EINVAL; + } +} + void qed_resc_setup(struct qed_dev *cdev) { int i; @@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev) qed_l2_setup(p_hwfn); qed_iov_setup(p_hwfn); + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_common_eqe_event); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); @@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); if (is_vf) id += 0x10; @@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, cache_line_size); } - if (L1_CACHE_BYTES > wr_mbs) + if (wr_mbs < L1_CACHE_BYTES) DP_INFO(p_hwfn, "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", L1_CACHE_BYTES, wr_mbs); @@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; - struct qed_qm_common_rt_init_params params; + struct qed_qm_common_rt_init_params *params; struct qed_dev *cdev = p_hwfn->cdev; u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + DP_NOTICE(p_hwfn->cdev, + "Failed to allocate common init params\n"); + + return -ENOMEM; + } + qed_init_cau_rt_data(cdev); /* Program GTT windows */ @@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qm_info->pf_wfq_en = true; } - memset(¶ms, 0, sizeof(params)); - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; - params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.pf_rl_en = qm_info->pf_rl_en; - params.pf_wfq_en = qm_info->pf_wfq_en; - params.global_rl_en = qm_info->vport_rl_en; - params.vport_wfq_en = qm_info->vport_wfq_en; - params.port_params = qm_info->qm_port_params; + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params->pf_rl_en = qm_info->pf_rl_en; + params->pf_wfq_en = qm_info->pf_wfq_en; + params->global_rl_en = qm_info->vport_rl_en; + params->vport_wfq_en = qm_info->vport_wfq_en; + params->port_params = qm_info->qm_port_params; - qed_qm_common_rt_init(p_hwfn, ¶ms); + qed_qm_common_rt_init(p_hwfn, params); qed_cxt_hw_init_common(p_hwfn); @@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) - return rc; + goto out; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); @@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); - qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); @@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); +out: + kfree(params); + return rc; } @@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_bar(p_hwfn, p_ptt); } - p_hwfn->wid_count = (u16) n_cpus; + p_hwfn->wid_count = (u16)n_cpus; DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", @@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, - PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) return qed_hsi_def_val[type][chip_id]; } + static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resc_max_val, mcp_resp; u8 res_id; int rc; + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { case QED_LL2_RAM_QUEUE: @@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) * resources allocation queries should be atomic. Since several PFs can * run in parallel - a resource lock is needed. * If either the resource lock or resource set value commands are not - * supported - skip the the max values setting, release the lock if + * supported - skip the max values setting, release the lock if * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ @@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", @@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); @@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d3c1f3879be8..f8682356d0cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -15,44 +15,52 @@ #include "qed_int.h" /** - * @brief qed_init_dp - initialize the debug level + * qed_init_dp(): Initialize the debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Module debug parameter. + * @dp_level: Module debug level. + * + * Return: Void. */ void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level); /** - * @brief qed_init_struct - initialize the device structure to - * its defaults + * qed_init_struct(): Initialize the device structure to + * its defaults. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_struct(struct qed_dev *cdev); /** - * @brief qed_resc_free - + * qed_resc_free: Free device resources. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_resc_free(struct qed_dev *cdev); /** - * @brief qed_resc_alloc - + * qed_resc_alloc(): Alloc device resources. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_resc_alloc(struct qed_dev *cdev); /** - * @brief qed_resc_setup - + * qed_resc_setup(): Setup device resources. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Void. */ void qed_resc_setup(struct qed_dev *cdev); @@ -105,94 +113,96 @@ struct qed_hw_init_params { }; /** - * @brief qed_hw_init - + * qed_hw_init(): Init Qed hardware. * - * @param cdev - * @param p_params + * @cdev: Qed dev pointer. + * @p_params: Pointers to params. * - * @return int + * Return: Int. */ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); /** - * @brief qed_hw_timers_stop_all - stop the timers HW block + * qed_hw_timers_stop_all(): Stop the timers HW block. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return void + * Return: void. */ void qed_hw_timers_stop_all(struct qed_dev *cdev); /** - * @brief qed_hw_stop - + * qed_hw_stop(): Stop Qed hardware. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_hw_stop(struct qed_dev *cdev); /** - * @brief qed_hw_stop_fastpath -should be called incase - * slowpath is still required for the device, - * but fastpath is not. + * qed_hw_stop_fastpath(): Should be called incase + * slowpath is still required for the device, + * but fastpath is not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_hw_stop_fastpath(struct qed_dev *cdev); /** - * @brief qed_hw_start_fastpath -restart fastpath traffic, - * only if hw_stop_fastpath was called + * qed_hw_start_fastpath(): Restart fastpath traffic, + * only if hw_stop_fastpath was called. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); - /** - * @brief qed_hw_prepare - + * qed_hw_prepare(): Prepare Qed hardware. * - * @param cdev - * @param personality - personality to initialize + * @cdev: Qed dev pointer. + * @personality: Personality to initialize. * - * @return int + * Return: Int. */ int qed_hw_prepare(struct qed_dev *cdev, int personality); /** - * @brief qed_hw_remove - + * qed_hw_remove(): Remove Qed hardware. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_hw_remove(struct qed_dev *cdev); /** - * @brief qed_ptt_acquire - Allocate a PTT window + * qed_ptt_acquire(): Allocate a PTT window. * - * Should be called at the entry point to the driver (at the beginning of an - * exported function) + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: struct qed_ptt. * - * @return struct qed_ptt + * Should be called at the entry point to the driver (at the beginning of an + * exported function). */ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_release - Release PTT Window + * qed_ptt_release(): Release PTT Window. * - * Should be called at the end of a flow - at the end of the function that - * acquired the PTT. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * + * Return: Void. * - * @param p_hwfn - * @param p_ptt + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -205,15 +215,17 @@ enum qed_dmae_address_type_t { }; /** - * @brief qed_dmae_host2grc - copy data from source addr to - * dmae registers using the given ptt + * qed_dmae_host2grc(): Copy data from source addr to + * dmae registers using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @grc_addr: GRC address (dmae_data_offset). + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param grc_addr (dmae_data_offset) - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, @@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_grc2host - Read data from dmae data offset - * to source address using the given ptt + * qed_dmae_grc2host(): Read data from dmae data offset + * to source address using the given ptt. + * + * @p_ptt: P_ptt. + * @grc_addr: GRC address (dmae_data_offset). + * @dest_addr: Destination Address. + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_ptt - * @param grc_addr (dmae_data_offset) - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_host2host - copy data from to source address - * to a destination adress (for SRIOV) using the given ptt + * qed_dmae_host2host(): Copy data from to source address + * to a destination adrress (for SRIOV) using the given + * ptt. * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @dest_addr: Destination address. + * @size_in_dwords: size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. */ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** - * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * qed_fw_l2_queue(): Get absolute L2 queue ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id); /** - * @@brief qed_fw_vport - Get absolute vport ID + * qed_fw_vport(): Get absolute vport ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * qed_fw_rss_eng(): Get absolute RSS engine ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter - * banks that are allocated to the PF. + * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter + * banks that are allocated to the PF. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return u8 - Number of LLH filter banks + * Return: u8 Number of LLH filter banks. */ u8 qed_llh_get_num_ppfid(struct qed_dev *cdev); @@ -314,45 +331,50 @@ enum qed_eng { }; /** - * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given - * LLH filter bank. + * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given + * LLH filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param eng + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng); /** - * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity + * qed_llh_set_roce_affinity(): Set the RoCE engine affinity. * - * @param cdev - * @param eng + * @cdev: Qed dev pointer. + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng); /** - * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter - * bank. + * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter + * bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @mac_addr: MAC to add. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param mac_addr - MAC to add + * Return: Int. */ int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]); + u8 ppfid, const u8 mac_addr[ETH_ALEN]); /** - * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given - * filter bank. + * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given + * filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Ppfid. + * @mac_addr: MAC to remove * - * @param p_ptt - * @param p_filter - MAC to remove + * Return: Void. */ void qed_llh_remove_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); @@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t { }; /** - * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the - * given filter bank. + * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the + * given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add - * @param type - type of filters and comparing + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. + * + * Return: Int. */ int qed_llh_add_protocol_filter(struct qed_dev *cdev, @@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from - * the given filter bank. + * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from + * the given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. */ void qed_llh_remove_protocol_filter(struct qed_dev *cdev, @@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * *@brief Cleanup of previous driver remains prior to load + * qed_final_cleanup(): Cleanup of previous driver remains prior to load. * - * @param p_hwfn - * @param p_ptt - * @param id - For PF, engine-relative. For VF, PF-relative. - * @param is_vf - true iff cleanup is made for a VF. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @id: For PF, engine-relative. For VF, PF-relative. + * @is_vf: True iff cleanup is made for a VF. * - * @return int + * Return: Int. */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. + * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue. * - * @param p_hwfn - * @param p_coal - store coalesce value read from the hardware. - * @param p_handle + * @p_hwfn: HW device data. + * @coal: Store coalesce value read from the hardware. + * @handle: P_handle. * - * @return int + * Return: Int. **/ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and * Tx queue. The fact that we can configure coalescing to up to 511, but on * varying accuracy [the bigger the value the less accurate] up to a mistake * of 3usec for the highest values. @@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * otherwise configuration would break. * + * @rx_coal: Rx Coalesce value in micro seconds. + * @tx_coal: TX Coalesce value in micro seconds. + * @p_handle: P_handle. * - * @param rx_coal - Rx Coalesce value in micro seconds. - * @param tx_coal - TX Coalesce value in micro seconds. - * @param p_handle - * - * @return int + * Return: Int. **/ int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); /** - * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER. * - * @param p_hwfn - * @param p_ptt - * @param b_enable - true/false + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_enable: True/False. * - * @return int + * Return: Int. */ int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * qed_db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. + * + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address of where db_data is stored. + * @db_width: Doorbell is 32b pr 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_width - doorbell is 32b pr 64b - * @param db_space - doorbell recovery addresses are user or kernel space + * Return: Int. */ int qed_db_recovery_add(struct qed_dev *cdev, void __iomem *db_addr, @@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev, enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * qed_db_recovery_del() - remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the + * @cdev: Qed dev pointer. + * @db_addr: doorbell address. + * @db_data: address where db_data is stored. Serves as key for the * entry to delete. + * + * Return: Int. */ int qed_db_recovery_del(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); - const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index 78070682f2df..6bb4e165b592 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; - rc = devlink_register(dl); - if (rc) - goto err_free; - rc = devlink_params_register(dl, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); if (rc) @@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) QED_DEVLINK_PARAM_ID_IWARP_CMT, value); - devlink_params_publish(dl); cdev->iwarp_cmt = false; qed_fw_reporters_create(dl); - + devlink_register(dl); return dl; err_unregister: - devlink_unregister(dl); - -err_free: devlink_free(dl); return ERR_PTR(rc); @@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink) if (!devlink) return; + devlink_unregister(devlink); qed_fw_reporters_destroy(devlink); devlink_params_unregister(devlink, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); - devlink_unregister(devlink); devlink_free(devlink); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index b768f0698170..3764190b948e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -30,6 +30,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iro_hsi.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_func_ramrod_data *p_data; - struct e4_fcoe_conn_context *p_cxt = NULL; + struct fcoe_conn_context *p_cxt = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_cxt_info cxt_info; @@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, memset(p_cxt, 0, sizeof(*p_cxt)); SET_FIELD(p_cxt->tstorm_ag_context.flags3, - E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); fcoe_pf_params->dummy_icid = (u16)dummy_cid; @@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { - struct e4_fcoe_task_context *p_task_ctx = NULL; + struct fcoe_task_context *p_task_ctx = NULL; u32 i, lc; int rc; @@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) if (rc) continue; - memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); lc = 0; SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); @@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, - E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index fb1baa2da2d0..f2cedbd9489c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #ifndef _QED_HSI_H @@ -38,7 +38,7 @@ enum common_event_opcode { COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_VF_FLR, COMMON_EVENT_PF_UPDATE, - COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_FW_ERROR, COMMON_EVENT_RL_UPDATE, COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE @@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode { MAX_CORE_L4_PSEUDO_CHECKSUM_MODE }; +/* LL2 SP error code */ +enum core_ll2_error_code { + LL2_OK = 0, + LL2_ERROR, + MAX_CORE_LL2_ERROR_CODE +}; + /* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_port_stats { struct regpair gsi_invalid_hdr; @@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +struct core_ll2_rx_per_queue_stat { + struct core_ll2_tstorm_per_queue_stat tstorm_stat; + struct core_ll2_ustorm_per_queue_stat ustorm_stat; +}; + +struct core_ll2_tx_per_queue_stat { + struct core_ll2_pstorm_per_queue_stat pstorm_stat; +}; + /* Structure for doorbell data, in PWM mode, for RX producers update. */ struct core_pwm_prod_update_data { __le16 icid; /* internal CID */ @@ -135,6 +151,15 @@ struct core_pwm_prod_update_data { struct core_ll2_rx_prod prod; /* Producers */ }; +/* Ramrod data for rx/tx queue statistics query ramrod */ +struct core_queue_stats_query_ramrod_data { + u8 rx_stat; + u8 tx_stat; + __le16 reserved[3]; + struct regpair rx_stat_addr; + struct regpair tx_stat_addr; +}; + /* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, @@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe { __le16 vlan; struct core_rx_cqe_opaque_data opaque_data; struct parsing_err_flags err_flags; - __le16 reserved0; + u8 packet_source; + u8 reserved0; __le32 reserved1[3]; }; @@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe { __le16 qp_id; __le32 src_qp; struct core_rx_cqe_opaque_data opaque_data; - __le32 reserved; + u8 packet_source; + u8 reserved[3]; }; /* Core RX CQE for Light L2 */ @@ -245,6 +272,15 @@ union core_rx_cqe_union { struct core_rx_slow_path_cqe rx_cqe_sp; }; +/* RX packet source. */ +enum core_rx_pkt_source { + CORE_RX_PKT_SOURCE_NETWORK = 0, + CORE_RX_PKT_SOURCE_LB, + CORE_RX_PKT_SOURCE_TX, + CORE_RX_PKT_SOURCE_LL2_TX, + MAX_CORE_RX_PKT_SOURCE +}; + /* Ramrod data for rx queue start ramrod */ struct core_rx_start_ramrod_data { struct regpair bd_base; @@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data { u8 update_qm_pq_id_flg; u8 reserved0; __le16 qm_pq_id; - __le32 reserved1; + __le32 reserved1[1]; }; /* Enum flag for what type of dcb data to update */ @@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx { /* Core Slowpath Connection storm context of Xstorm */ struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo; - __le32 spq_base_hi; - struct regpair consolid_base_addr; + struct regpair spq_base_addr; + __le32 reserved0[2]; __le16 spq_cons; - __le16 consolid_cons; - __le32 reserved0[55]; + __le16 reserved1[111]; }; -struct e4_xstorm_core_conn_ag_ctx { +struct xstorm_core_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 consolid_prod; @@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 word15; }; -struct e4_tstorm_core_conn_ag_ctx { +struct tstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_core_conn_ag_ctx { +struct ustorm_core_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx { }; /* core connection context */ -struct e4_core_conn_context { +struct core_conn_context { struct ystorm_core_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_core_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_core_conn_st_ctx xstorm_st_context; - struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context; - struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; struct mstorm_core_conn_st_ctx mstorm_st_context; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; @@ -930,12 +964,12 @@ struct eth_rx_rate_limit { /* Update RSS indirection table entry command */ struct eth_tstorm_rss_update_data { - u8 valid; u8 vport_id; u8 ind_table_index; - u8 reserved; __le16 ind_table_value; __le16 reserved1; + u8 reserved; + u8 valid; }; struct eth_ustorm_per_pf_stat { @@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data { struct regpair msg_addr; }; -/* Event Ring malicious VF data */ -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - /* Event Ring initial cleanup data */ struct initial_cleanup_eqe_data { u8 vf_id; u8 reserved[7]; }; +/* FW error data */ +struct fw_err_data { + u8 recovery_scope; + u8 err_id; + __le16 entity_id; + u8 reserved[4]; +}; + /* Event Data Union */ union event_ring_data { u8 bytes[8]; @@ -987,8 +1022,8 @@ union event_ring_data { struct iscsi_eqe_data iscsi_info; struct iscsi_connect_done_results iscsi_conn_done_info; union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; struct initial_cleanup_eqe_data vf_init_cleanup; + struct fw_err_data err_data; }; /* Event Ring Entry */ @@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct { u8 major_ver_arr[2]; }; +/* Integration Phase */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3, + INTEG_PHASE_BB_B0_NO_MCP = 10, + INTEG_PHASE_BB_B0_WITH_MCP = 11, + MAX_INTEG_PHASE +}; + +/* Ports mode */ enum iwarp_ll2_tx_queues { IWARP_LL2_IN_ORDER_TX_QUEUE = 1, IWARP_LL2_ALIGNED_TX_QUEUE, @@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues { MAX_IWARP_LL2_TX_QUEUES }; -/* Malicious VF error ID */ -enum malicious_vf_error_id { - MALICIOUS_VF_NO_ERROR, +/* Function error ID */ +enum func_err_id { + FUNC_NO_ERROR, VF_PF_CHANNEL_NOT_READY, VF_ZONE_MSG_NOT_VALID, VF_ZONE_FUNC_NOT_ENABLED, @@ -1087,13 +1131,33 @@ enum malicious_vf_error_id { CORE_PACKET_SIZE_TOO_LARGE, CORE_ILLEGAL_BD_FLAGS, CORE_GSI_PACKET_VIOLATION, - MAX_MALICIOUS_VF_ERROR_ID, + MAX_FUNC_ERR_ID +}; + +/* FW error handling mode */ +enum fw_err_mode { + FW_ERR_FATAL_ASSERT, + FW_ERR_DRV_REPORT, + MAX_FW_ERR_MODE +}; + +/* FW error recovery scope */ +enum fw_err_recovery_scope { + ERR_SCOPE_INVALID, + ERR_SCOPE_TX_Q, + ERR_SCOPE_RX_Q, + ERR_SCOPE_QP, + ERR_SCOPE_VPORT, + ERR_SCOPE_FUNC, + ERR_SCOPE_PORT, + ERR_SCOPE_ENGINE, + MAX_FW_ERR_RECOVERY_SCOPE }; /* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; - struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD]; }; /* Mstorm VF zone */ @@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config { /* Ramrod data for PF start ramrod */ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; - struct regpair consolid_q_pbl_addr; + struct regpair consolid_q_pbl_base_addr; struct pf_start_tunnel_config tunnel_config; __le16 event_ring_sb_id; u8 base_vf_id; @@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data { u8 reserved0; struct hsi_fp_ver_struct hsi_fp_ver; struct outer_tag_config_struct outer_tag_config; + u8 pf_fp_err_mode; + u8 consolid_q_num_pages; + u8 reserved[6]; }; /* Data for port update ramrod */ @@ -1230,6 +1297,13 @@ enum ports_mode { MAX_PORTS_MODE }; +/* Protocol-common error code */ +enum protocol_common_error_code { + COMMON_ERR_CODE_OK = 0, + COMMON_ERR_CODE_ERROR, + MAX_PROTOCOL_COMMON_ERROR_CODE +}; + /* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ enum protocol_version_array_key { ETH_VER_KEY = 0, @@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; -struct e4_mstorm_core_conn_ag_ctx { +struct mstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_ystorm_core_conn_ag_ctx { +struct ystorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -1704,6 +1778,7 @@ struct igu_msix_vector { #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 }; + /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask { }; /* QM hardware structure of QM map memory */ -struct qm_rf_pq_map_e4 { +struct qm_rf_pq_map { __le32 reg; -#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF -#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F -#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 -#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26 +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 }; /* Completion params for aggregated interrupt completion */ @@ -1831,769 +1906,6 @@ struct virt_mem_desc { u32 size; /* In bytes */ }; -/****************************************/ -/* Debug Tools HSI constants and macros */ -/****************************************/ - -enum block_id { - BLOCK_GRC, - BLOCK_MISCS, - BLOCK_MISC, - BLOCK_DBU, - BLOCK_PGLUE_B, - BLOCK_CNIG, - BLOCK_CPMU, - BLOCK_NCSI, - BLOCK_OPTE, - BLOCK_BMB, - BLOCK_PCIE, - BLOCK_MCP, - BLOCK_MCP2, - BLOCK_PSWHST, - BLOCK_PSWHST2, - BLOCK_PSWRD, - BLOCK_PSWRD2, - BLOCK_PSWWR, - BLOCK_PSWWR2, - BLOCK_PSWRQ, - BLOCK_PSWRQ2, - BLOCK_PGLCS, - BLOCK_DMAE, - BLOCK_PTU, - BLOCK_TCM, - BLOCK_MCM, - BLOCK_UCM, - BLOCK_XCM, - BLOCK_YCM, - BLOCK_PCM, - BLOCK_QM, - BLOCK_TM, - BLOCK_DORQ, - BLOCK_BRB, - BLOCK_SRC, - BLOCK_PRS, - BLOCK_TSDM, - BLOCK_MSDM, - BLOCK_USDM, - BLOCK_XSDM, - BLOCK_YSDM, - BLOCK_PSDM, - BLOCK_TSEM, - BLOCK_MSEM, - BLOCK_USEM, - BLOCK_XSEM, - BLOCK_YSEM, - BLOCK_PSEM, - BLOCK_RSS, - BLOCK_TMLD, - BLOCK_MULD, - BLOCK_YULD, - BLOCK_XYLD, - BLOCK_PRM, - BLOCK_PBF_PB1, - BLOCK_PBF_PB2, - BLOCK_RPB, - BLOCK_BTB, - BLOCK_PBF, - BLOCK_RDIF, - BLOCK_TDIF, - BLOCK_CDU, - BLOCK_CCFC, - BLOCK_TCFC, - BLOCK_IGU, - BLOCK_CAU, - BLOCK_UMAC, - BLOCK_XMAC, - BLOCK_MSTAT, - BLOCK_DBG, - BLOCK_NIG, - BLOCK_WOL, - BLOCK_BMBN, - BLOCK_IPC, - BLOCK_NWM, - BLOCK_NWS, - BLOCK_MS, - BLOCK_PHY_PCIE, - BLOCK_LED, - BLOCK_AVS_WRAP, - BLOCK_PXPREQBUS, - BLOCK_BAR0_MAP, - BLOCK_MCP_FIO, - BLOCK_LAST_INIT, - BLOCK_PRS_FC, - BLOCK_PBF_FC, - BLOCK_NIG_LB_FC, - BLOCK_NIG_LB_FC_PLLH, - BLOCK_NIG_TX_FC_PLLH, - BLOCK_NIG_TX_FC, - BLOCK_NIG_RX_FC_PLLH, - BLOCK_NIG_RX_FC, - MAX_BLOCK_ID -}; - -/* binary debug buffer types */ -enum bin_dbg_buffer_type { - BIN_BUF_DBG_MODE_TREE, - BIN_BUF_DBG_DUMP_REG, - BIN_BUF_DBG_DUMP_MEM, - BIN_BUF_DBG_IDLE_CHK_REGS, - BIN_BUF_DBG_IDLE_CHK_IMMS, - BIN_BUF_DBG_IDLE_CHK_RULES, - BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, - BIN_BUF_DBG_ATTN_BLOCKS, - BIN_BUF_DBG_ATTN_REGS, - BIN_BUF_DBG_ATTN_INDEXES, - BIN_BUF_DBG_ATTN_NAME_OFFSETS, - BIN_BUF_DBG_BLOCKS, - BIN_BUF_DBG_BLOCKS_CHIP_DATA, - BIN_BUF_DBG_BUS_LINES, - BIN_BUF_DBG_BLOCKS_USER_DATA, - BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, - BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, - BIN_BUF_DBG_RESET_REGS, - BIN_BUF_DBG_PARSING_STRINGS, - MAX_BIN_DBG_BUFFER_TYPE -}; - - -/* Attention bit mapping */ -struct dbg_attn_bit_mapping { - u16 data; -#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF -#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 -#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 -#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 -}; - -/* Attention block per-type data */ -struct dbg_attn_block_type_data { - u16 names_offset; - u16 reserved1; - u8 num_regs; - u8 reserved2; - u16 regs_offset; - -}; - -/* Block attentions */ -struct dbg_attn_block { - struct dbg_attn_block_type_data per_type_data[2]; -}; - -/* Attention register result */ -struct dbg_attn_reg_result { - u32 data; -#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF -#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF -#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 - u16 block_attn_offset; - u16 reserved; - u32 sts_val; - u32 mask_val; -}; - -/* Attention block result */ -struct dbg_attn_block_result { - u8 block_id; - u8 data; -#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 -#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 -#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F -#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 - u16 names_offset; - struct dbg_attn_reg_result reg_results[15]; -}; - -/* Mode header */ -struct dbg_mode_hdr { - u16 data; -#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 -#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 -#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF -#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 -}; - -/* Attention register */ -struct dbg_attn_reg { - struct dbg_mode_hdr mode; - u16 block_attn_offset; - u32 data; -#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF -#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF -#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 - u32 sts_clr_address; - u32 mask_address; -}; - -/* Attention types */ -enum dbg_attn_type { - ATTN_TYPE_INTERRUPT, - ATTN_TYPE_PARITY, - MAX_DBG_ATTN_TYPE -}; - -/* Block debug data */ -struct dbg_block { - u8 name[15]; - u8 associated_storm_letter; -}; - -/* Chip-specific block debug data */ -struct dbg_block_chip { - u8 flags; -#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 -#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 -#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 -#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 -#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 -#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 -#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 -#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 -#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 - u8 dbg_client_id; - u8 reset_reg_id; - u8 reset_reg_bit_offset; - struct dbg_mode_hdr dbg_bus_mode; - u16 reserved1; - u8 reserved2; - u8 num_of_dbg_bus_lines; - u16 dbg_bus_lines_offset; - u32 dbg_select_reg_addr; - u32 dbg_dword_enable_reg_addr; - u32 dbg_shift_reg_addr; - u32 dbg_force_valid_reg_addr; - u32 dbg_force_frame_reg_addr; -}; - -/* Chip-specific block user debug data */ -struct dbg_block_chip_user { - u8 num_of_dbg_bus_lines; - u8 has_latency_events; - u16 names_offset; -}; - -/* Block user debug data */ -struct dbg_block_user { - u8 name[16]; -}; - -/* Block Debug line data */ -struct dbg_bus_line { - u8 data; -#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF -#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 -#define DBG_BUS_LINE_IS_256B_MASK 0x1 -#define DBG_BUS_LINE_IS_256B_SHIFT 4 -#define DBG_BUS_LINE_RESERVED_MASK 0x7 -#define DBG_BUS_LINE_RESERVED_SHIFT 5 - u8 group_sizes; -}; - -/* Condition header for registers dump */ -struct dbg_dump_cond_hdr { - struct dbg_mode_hdr mode; /* Mode header */ - u8 block_id; /* block ID */ - u8 data_size; /* size in dwords of the data following this header */ -}; - -/* Memory data for registers dump */ -struct dbg_dump_mem { - u32 dword0; -#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF -#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 -#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF -#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - u32 dword1; -#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF -#define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 -#define DBG_DUMP_MEM_RESERVED_MASK 0x7F -#define DBG_DUMP_MEM_RESERVED_SHIFT 25 -}; - -/* Register data for registers dump */ -struct dbg_dump_reg { - u32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF -#define DBG_DUMP_REG_LENGTH_SHIFT 24 -}; - -/* Split header for registers dump */ -struct dbg_dump_split_hdr { - u32 hdr; -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 -}; - -/* Condition header for idle check */ -struct dbg_idle_chk_cond_hdr { - struct dbg_mode_hdr mode; /* Mode header */ - u16 data_size; /* size in dwords of the data following this header */ -}; - -/* Idle Check condition register */ -struct dbg_idle_chk_cond_reg { - u32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - u16 num_entries; - u8 entry_size; - u8 start_entry; -}; - -/* Idle Check info register */ -struct dbg_idle_chk_info_reg { - u32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - u16 size; /* register size in dwords */ - struct dbg_mode_hdr mode; /* Mode header */ -}; - -/* Idle Check register */ -union dbg_idle_chk_reg { - struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ - struct dbg_idle_chk_info_reg info_reg; /* info register */ -}; - -/* Idle Check result header */ -struct dbg_idle_chk_result_hdr { - u16 rule_id; /* Failing rule index */ - u16 mem_entry_id; /* Failing memory entry index */ - u8 num_dumped_cond_regs; /* number of dumped condition registers */ - u8 num_dumped_info_regs; /* number of dumped condition registers */ - u8 severity; /* from dbg_idle_chk_severity_types enum */ - u8 reserved; -}; - -/* Idle Check result register header */ -struct dbg_idle_chk_result_reg_hdr { - u8 data; -#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 -#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 -#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F -#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 - u8 start_entry; /* index of the first checked entry */ - u16 size; /* register size in dwords */ -}; - -/* Idle Check rule */ -struct dbg_idle_chk_rule { - u16 rule_id; /* Idle Check rule ID */ - u8 severity; /* value from dbg_idle_chk_severity_types enum */ - u8 cond_id; /* Condition ID */ - u8 num_cond_regs; /* number of condition registers */ - u8 num_info_regs; /* number of info registers */ - u8 num_imms; /* number of immediates in the condition */ - u8 reserved1; - u16 reg_offset; /* offset of this rules registers in the idle check - * register array (in dbg_idle_chk_reg units). - */ - u16 imm_offset; /* offset of this rules immediate values in the - * immediate values array (in dwords). - */ -}; - -/* Idle Check rule parsing data */ -struct dbg_idle_chk_rule_parsing_data { - u32 data; -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 -}; - -/* Idle check severity types */ -enum dbg_idle_chk_severity_types { - /* idle check failure should cause an error */ - IDLE_CHK_SEVERITY_ERROR, - /* idle check failure should cause an error only if theres no traffic */ - IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, - /* idle check failure should cause a warning */ - IDLE_CHK_SEVERITY_WARNING, - MAX_DBG_IDLE_CHK_SEVERITY_TYPES -}; - -/* Reset register */ -struct dbg_reset_reg { - u32 data; -#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF -#define DBG_RESET_REG_ADDR_SHIFT 0 -#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 -#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 -#define DBG_RESET_REG_RESERVED_MASK 0x7F -#define DBG_RESET_REG_RESERVED_SHIFT 25 -}; - -/* Debug Bus block data */ -struct dbg_bus_block_data { - u8 enable_mask; - u8 right_shift; - u8 force_valid_mask; - u8 force_frame_mask; - u8 dword_mask; - u8 line_num; - u8 hw_id; - u8 flags; -#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 -#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 -#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F -#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 -}; - -enum dbg_bus_clients { - DBG_BUS_CLIENT_RBCN, - DBG_BUS_CLIENT_RBCP, - DBG_BUS_CLIENT_RBCR, - DBG_BUS_CLIENT_RBCT, - DBG_BUS_CLIENT_RBCU, - DBG_BUS_CLIENT_RBCF, - DBG_BUS_CLIENT_RBCX, - DBG_BUS_CLIENT_RBCS, - DBG_BUS_CLIENT_RBCH, - DBG_BUS_CLIENT_RBCZ, - DBG_BUS_CLIENT_OTHER_ENGINE, - DBG_BUS_CLIENT_TIMESTAMP, - DBG_BUS_CLIENT_CPU, - DBG_BUS_CLIENT_RBCY, - DBG_BUS_CLIENT_RBCQ, - DBG_BUS_CLIENT_RBCM, - DBG_BUS_CLIENT_RBCB, - DBG_BUS_CLIENT_RBCW, - DBG_BUS_CLIENT_RBCV, - MAX_DBG_BUS_CLIENTS -}; - -/* Debug Bus constraint operation types */ -enum dbg_bus_constraint_ops { - DBG_BUS_CONSTRAINT_OP_EQ, - DBG_BUS_CONSTRAINT_OP_NE, - DBG_BUS_CONSTRAINT_OP_LT, - DBG_BUS_CONSTRAINT_OP_LTC, - DBG_BUS_CONSTRAINT_OP_LE, - DBG_BUS_CONSTRAINT_OP_LEC, - DBG_BUS_CONSTRAINT_OP_GT, - DBG_BUS_CONSTRAINT_OP_GTC, - DBG_BUS_CONSTRAINT_OP_GE, - DBG_BUS_CONSTRAINT_OP_GEC, - MAX_DBG_BUS_CONSTRAINT_OPS -}; - -/* Debug Bus trigger state data */ -struct dbg_bus_trigger_state_data { - u8 msg_len; - u8 constraint_dword_mask; - u8 storm_id; - u8 reserved; -}; - -/* Debug Bus memory address */ -struct dbg_bus_mem_addr { - u32 lo; - u32 hi; -}; - -/* Debug Bus PCI buffer data */ -struct dbg_bus_pci_buf_data { - struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ - struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ - u32 size; /* PCI buffer size in bytes */ -}; - -/* Debug Bus Storm EID range filter params */ -struct dbg_bus_storm_eid_range_params { - u8 min; /* Minimal event ID to filter on */ - u8 max; /* Maximal event ID to filter on */ -}; - -/* Debug Bus Storm EID mask filter params */ -struct dbg_bus_storm_eid_mask_params { - u8 val; /* Event ID value */ - u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ -}; - -/* Debug Bus Storm EID filter params */ -union dbg_bus_storm_eid_params { - struct dbg_bus_storm_eid_range_params range; - struct dbg_bus_storm_eid_mask_params mask; -}; - -/* Debug Bus Storm data */ -struct dbg_bus_storm_data { - u8 enabled; - u8 mode; - u8 hw_id; - u8 eid_filter_en; - u8 eid_range_not_mask; - u8 cid_filter_en; - union dbg_bus_storm_eid_params eid_filter_params; - u32 cid; -}; - -/* Debug Bus data */ -struct dbg_bus_data { - u32 app_version; - u8 state; - u8 mode_256b_en; - u8 num_enabled_blocks; - u8 num_enabled_storms; - u8 target; - u8 one_shot_en; - u8 grc_input_en; - u8 timestamp_input_en; - u8 filter_en; - u8 adding_filter; - u8 filter_pre_trigger; - u8 filter_post_trigger; - u8 trigger_en; - u8 filter_constraint_dword_mask; - u8 next_trigger_state; - u8 next_constraint_id; - struct dbg_bus_trigger_state_data trigger_states[3]; - u8 filter_msg_len; - u8 rcv_from_other_engine; - u8 blocks_dword_mask; - u8 blocks_dword_overlap; - u32 hw_id_mask; - struct dbg_bus_pci_buf_data pci_buf; - struct dbg_bus_block_data blocks[132]; - struct dbg_bus_storm_data storms[6]; -}; - -/* Debug bus states */ -enum dbg_bus_states { - DBG_BUS_STATE_IDLE, - DBG_BUS_STATE_READY, - DBG_BUS_STATE_RECORDING, - DBG_BUS_STATE_STOPPED, - MAX_DBG_BUS_STATES -}; - -/* Debug Bus Storm modes */ -enum dbg_bus_storm_modes { - DBG_BUS_STORM_MODE_PRINTF, - DBG_BUS_STORM_MODE_PRAM_ADDR, - DBG_BUS_STORM_MODE_DRA_RW, - DBG_BUS_STORM_MODE_DRA_W, - DBG_BUS_STORM_MODE_LD_ST_ADDR, - DBG_BUS_STORM_MODE_DRA_FSM, - DBG_BUS_STORM_MODE_FAST_DBGMUX, - DBG_BUS_STORM_MODE_RH, - DBG_BUS_STORM_MODE_RH_WITH_STORE, - DBG_BUS_STORM_MODE_FOC, - DBG_BUS_STORM_MODE_EXT_STORE, - MAX_DBG_BUS_STORM_MODES -}; - -/* Debug bus target IDs */ -enum dbg_bus_targets { - DBG_BUS_TARGET_ID_INT_BUF, - DBG_BUS_TARGET_ID_NIG, - DBG_BUS_TARGET_ID_PCI, - MAX_DBG_BUS_TARGETS -}; - -/* GRC Dump data */ -struct dbg_grc_data { - u8 params_initialized; - u8 reserved1; - u16 reserved2; - u32 param_val[48]; -}; - -/* Debug GRC params */ -enum dbg_grc_params { - DBG_GRC_PARAM_DUMP_TSTORM, - DBG_GRC_PARAM_DUMP_MSTORM, - DBG_GRC_PARAM_DUMP_USTORM, - DBG_GRC_PARAM_DUMP_XSTORM, - DBG_GRC_PARAM_DUMP_YSTORM, - DBG_GRC_PARAM_DUMP_PSTORM, - DBG_GRC_PARAM_DUMP_REGS, - DBG_GRC_PARAM_DUMP_RAM, - DBG_GRC_PARAM_DUMP_PBUF, - DBG_GRC_PARAM_DUMP_IOR, - DBG_GRC_PARAM_DUMP_VFC, - DBG_GRC_PARAM_DUMP_CM_CTX, - DBG_GRC_PARAM_DUMP_PXP, - DBG_GRC_PARAM_DUMP_RSS, - DBG_GRC_PARAM_DUMP_CAU, - DBG_GRC_PARAM_DUMP_QM, - DBG_GRC_PARAM_DUMP_MCP, - DBG_GRC_PARAM_DUMP_DORQ, - DBG_GRC_PARAM_DUMP_CFC, - DBG_GRC_PARAM_DUMP_IGU, - DBG_GRC_PARAM_DUMP_BRB, - DBG_GRC_PARAM_DUMP_BTB, - DBG_GRC_PARAM_DUMP_BMB, - DBG_GRC_PARAM_RESERVD1, - DBG_GRC_PARAM_DUMP_MULD, - DBG_GRC_PARAM_DUMP_PRS, - DBG_GRC_PARAM_DUMP_DMAE, - DBG_GRC_PARAM_DUMP_TM, - DBG_GRC_PARAM_DUMP_SDM, - DBG_GRC_PARAM_DUMP_DIF, - DBG_GRC_PARAM_DUMP_STATIC, - DBG_GRC_PARAM_UNSTALL, - DBG_GRC_PARAM_RESERVED2, - DBG_GRC_PARAM_MCP_TRACE_META_SIZE, - DBG_GRC_PARAM_EXCLUDE_ALL, - DBG_GRC_PARAM_CRASH, - DBG_GRC_PARAM_PARITY_SAFE, - DBG_GRC_PARAM_DUMP_CM, - DBG_GRC_PARAM_DUMP_PHY, - DBG_GRC_PARAM_NO_MCP, - DBG_GRC_PARAM_NO_FW_VER, - DBG_GRC_PARAM_RESERVED3, - DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, - DBG_GRC_PARAM_DUMP_ILT_CDUC, - DBG_GRC_PARAM_DUMP_ILT_CDUT, - DBG_GRC_PARAM_DUMP_CAU_EXT, - MAX_DBG_GRC_PARAMS -}; - -/* Debug status codes */ -enum dbg_status { - DBG_STATUS_OK, - DBG_STATUS_APP_VERSION_NOT_SET, - DBG_STATUS_UNSUPPORTED_APP_VERSION, - DBG_STATUS_DBG_BLOCK_NOT_RESET, - DBG_STATUS_INVALID_ARGS, - DBG_STATUS_OUTPUT_ALREADY_SET, - DBG_STATUS_INVALID_PCI_BUF_SIZE, - DBG_STATUS_PCI_BUF_ALLOC_FAILED, - DBG_STATUS_PCI_BUF_NOT_ALLOCATED, - DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, - DBG_STATUS_NO_MATCHING_FRAMING_MODE, - DBG_STATUS_VFC_READ_ERROR, - DBG_STATUS_STORM_ALREADY_ENABLED, - DBG_STATUS_STORM_NOT_ENABLED, - DBG_STATUS_BLOCK_ALREADY_ENABLED, - DBG_STATUS_BLOCK_NOT_ENABLED, - DBG_STATUS_NO_INPUT_ENABLED, - DBG_STATUS_NO_FILTER_TRIGGER_256B, - DBG_STATUS_FILTER_ALREADY_ENABLED, - DBG_STATUS_TRIGGER_ALREADY_ENABLED, - DBG_STATUS_TRIGGER_NOT_ENABLED, - DBG_STATUS_CANT_ADD_CONSTRAINT, - DBG_STATUS_TOO_MANY_TRIGGER_STATES, - DBG_STATUS_TOO_MANY_CONSTRAINTS, - DBG_STATUS_RECORDING_NOT_STARTED, - DBG_STATUS_DATA_DIDNT_TRIGGER, - DBG_STATUS_NO_DATA_RECORDED, - DBG_STATUS_DUMP_BUF_TOO_SMALL, - DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, - DBG_STATUS_UNKNOWN_CHIP, - DBG_STATUS_VIRT_MEM_ALLOC_FAILED, - DBG_STATUS_BLOCK_IN_RESET, - DBG_STATUS_INVALID_TRACE_SIGNATURE, - DBG_STATUS_INVALID_NVRAM_BUNDLE, - DBG_STATUS_NVRAM_GET_IMAGE_FAILED, - DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, - DBG_STATUS_NVRAM_READ_FAILED, - DBG_STATUS_IDLE_CHK_PARSE_FAILED, - DBG_STATUS_MCP_TRACE_BAD_DATA, - DBG_STATUS_MCP_TRACE_NO_META, - DBG_STATUS_MCP_COULD_NOT_HALT, - DBG_STATUS_MCP_COULD_NOT_RESUME, - DBG_STATUS_RESERVED0, - DBG_STATUS_SEMI_FIFO_NOT_EMPTY, - DBG_STATUS_IGU_FIFO_BAD_DATA, - DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, - DBG_STATUS_FW_ASSERTS_PARSE_FAILED, - DBG_STATUS_REG_FIFO_BAD_DATA, - DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, - DBG_STATUS_DBG_ARRAY_NOT_SET, - DBG_STATUS_RESERVED1, - DBG_STATUS_NON_MATCHING_LINES, - DBG_STATUS_INSUFFICIENT_HW_IDS, - DBG_STATUS_DBG_BUS_IN_USE, - DBG_STATUS_INVALID_STORM_DBG_MODE, - DBG_STATUS_OTHER_ENGINE_BB_ONLY, - DBG_STATUS_FILTER_SINGLE_HW_ID, - DBG_STATUS_TRIGGER_SINGLE_HW_ID, - DBG_STATUS_MISSING_TRIGGER_STATE_STORM, - MAX_DBG_STATUS -}; - -/* Debug Storms IDs */ -enum dbg_storms { - DBG_TSTORM_ID, - DBG_MSTORM_ID, - DBG_USTORM_ID, - DBG_XSTORM_ID, - DBG_YSTORM_ID, - DBG_PSTORM_ID, - MAX_DBG_STORMS -}; - -/* Idle Check data */ -struct idle_chk_data { - u32 buf_size; - u8 buf_size_set; - u8 reserved1; - u16 reserved2; -}; - -struct pretend_params { - u8 split_type; - u8 reserved; - u16 split_id; -}; - -/* Debug Tools data (per HW function) - */ -struct dbg_tools_data { - struct dbg_grc_data grc; - struct dbg_bus_data bus; - struct idle_chk_data idle_chk; - u8 mode_enable[40]; - u8 block_in_reset[132]; - u8 chip_id; - u8 hw_type; - u8 num_ports; - u8 num_pfs_per_port; - u8 num_vfs; - u8 initialized; - u8 use_dmae; - u8 reserved; - struct pretend_params pretend; - u32 num_regs_read; -}; - -/* ILT Clients */ -enum ilt_clients { - ILT_CLI_CDUC, - ILT_CLI_CDUT, - ILT_CLI_QM, - ILT_CLI_TM, - ILT_CLI_SRC, - ILT_CLI_TSDM, - ILT_CLI_RGFS, - ILT_CLI_TGFS, - MAX_ILT_CLIENTS -}; - /********************************/ /* HSI Init Functions constants */ /********************************/ @@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req { /* QM per global RL init parameters */ struct init_qm_global_rl_params { + u8 type; + u8 reserved0; + u16 reserved1; u32 rate_limit; }; @@ -2658,18 +1973,33 @@ struct init_qm_port_params { /* QM per-PQ init parameters */ struct init_qm_pq_params { - u8 vport_id; + u16 vport_id; + u16 rl_id; + u8 rl_valid; u8 tc_id; u8 wrr_group; - u8 rl_valid; - u16 rl_id; u8 port_id; - u8 reserved; +}; + +/* QM per RL init parameters */ +struct init_qm_rl_params { + u32 vport_rl; + u8 vport_rl_type; + u8 reserved[3]; +}; + +/* QM Rate Limiter types */ +enum init_qm_rl_type { + QM_RL_TYPE_NORMAL, + QM_RL_TYPE_QCN, + MAX_INIT_QM_RL_TYPE }; /* QM per-vport init parameters */ struct init_qm_vport_params { u16 wfq; + u16 reserved; + u16 tc_wfq[NUM_OF_TCS]; u16 first_tx_pq_id[NUM_OF_TCS]; }; @@ -2728,14 +2058,14 @@ struct fw_info_location { }; enum init_modes { - MODE_RESERVED, + MODE_BB_A0_DEPRECATED, MODE_BB, MODE_K2, MODE_ASIC, - MODE_RESERVED2, - MODE_RESERVED3, - MODE_RESERVED4, - MODE_RESERVED5, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, MODE_SF, MODE_MF_SD, MODE_MF_SI, @@ -2743,8 +2073,8 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_RESERVED6, - MODE_RESERVED7, + MODE_SKIP_PRAM_INIT, + MODE_EMUL_MAC, MAX_INIT_MODES }; @@ -3009,706 +2339,6 @@ struct iro { u16 size; }; -/***************************** Public Functions *******************************/ - -/** - * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug - * arrays. - * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. - */ -enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, - const u8 * const bin_ptr); - -/** - * @brief qed_read_regs - Reads registers into a buffer (using GRC). - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf - Destination buffer. - * @param addr - Source GRC address in dwords. - * @param len - Number of registers to read. - */ -void qed_read_regs(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); - -/** - * @brief qed_read_fw_info - Reads FW info from the chip. - * - * The FW info contains FW-related information, such as the FW version, - * FW image (main/L2B/kuku), FW timestamp, etc. - * The FW info is read from the internal RAM of the first Storm that is not in - * reset. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param fw_info - Out: a pointer to write the FW info into. - * - * @return true if the FW info was read successfully from one of the Storms, - * or false if all Storms are in reset. - */ -bool qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct fw_info *fw_info); -/** - * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. - * - * @param p_hwfn - HW device data - * @param grc_param - GRC parameter - * @param val - Value to set. - * - * @return error if one of the following holds: - * - the version wasn't set - * - grc_param is invalid - * - val is outside the allowed boundaries - */ -enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, - enum dbg_grc_params grc_param, u32 val); - -/** - * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their - * default value. - * - * @param p_hwfn - HW device data - */ -void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); -/** - * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for - * GRC Dump. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump - * data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the collected GRC data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified dump buffer is too small - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size - * for idle check results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the idle check - * data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the idle check data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size - * for mcp trace results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the mcp trace data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * - the trace meta data cannot be read (from NVRAM or image file) - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size - * for grc trace fifo results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the reg fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size - * for the IGU fifo results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo - * data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the IGU fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required - * buffer size for protection override window results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for protection - * override data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status -qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); -/** - * @brief qed_dbg_protection_override_dump - Reads protection override window - * entries and writes the results into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the protection override data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); -/** - * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer - * size for FW Asserts results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); -/** - * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the FW Asserts data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_read_attn - Reads the attention registers of the specified - * block and type, and writes the results into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param block - Block ID. - * @param attn_type - Attention type. - * @param clear_status - Indicates if the attention status should be cleared. - * @param results - OUT: Pointer to write the read results into - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum block_id block, - enum dbg_attn_type attn_type, - bool clear_status, - struct dbg_attn_block_result *results); - -/** - * @brief qed_dbg_print_attn - Prints attention registers values in the - * specified results struct. - * - * @param p_hwfn - * @param results - Pointer to the attention read results - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, - struct dbg_attn_block_result *results); - -/******************************* Data Types **********************************/ - -struct mcp_trace_format { - u32 data; -#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 -#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 -#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 -#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 -#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 -#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_OFFSET 24 - - char *format_str; -}; - -/* MCP Trace Meta data structure */ -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; - bool is_allocated; -}; - -/* Debug Tools user data */ -struct dbg_tools_user_data { - struct mcp_trace_meta mcp_trace_meta; - const u32 *mcp_trace_user_meta_buf; -}; - -/******************************** Constants **********************************/ - -#define MAX_NAME_LEN 16 - -/***************************** Public Functions *******************************/ - -/** - * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with - * debug arrays. - * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. - */ -enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, - const u8 * const bin_ptr); - -/** - * @brief qed_dbg_alloc_user_data - Allocates user debug data. - * - * @param p_hwfn - HW device data - * @param user_data_ptr - OUT: a pointer to the allocated memory. - */ -enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, - void **user_data_ptr); - -/** - * @brief qed_dbg_get_status_str - Returns a string for the specified status. - * - * @param status - a debug status code. - * - * @return a string for the specified status - */ -const char *qed_dbg_get_status_str(enum dbg_status status); - -/** - * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size - * for idle check results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); -/** - * @brief qed_print_idle_chk_results - Prints idle check results - * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the idle check results. - * @param num_errors - OUT: number of errors found in idle check. - * @param num_warnings - OUT: number of warnings found in idle check. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf, - u32 *num_errors, - u32 *num_warnings); - -/** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. - * - * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to - * no NVRAM access). - * - * @param data - pointer to MCP Trace meta data - * @param size - size of MCP Trace meta data in dwords - */ -void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, - const u32 *meta_buf); - -/** - * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size - * for MCP Trace results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - MCP Trace dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_mcp_trace_results - Prints MCP Trace results - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and - * keeps the MCP trace meta data allocated, to support continuous MCP Trace - * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should - * be called to free the meta data. - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - char *results_buf); - -/** - * @brief print_mcp_trace_line - Prints MCP Trace results for a single line - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_bytes - number of bytes that were dumped. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, - u8 *dump_buf, - u32 num_dumped_bytes, - char *results_buf); - -/** - * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. - * Should be called after continuous MCP Trace parsing. - * - * @param p_hwfn - HW device data - */ -void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); - -/** - * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size - * for reg_fifo results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_reg_fifo_results - Prints reg fifo results - * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size - * for igu_fifo results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_igu_fifo_results - Prints IGU fifo results - * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the IGU fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_protection_override_results_buf_size - Returns the required - * buffer size for protection override results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status -qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_protection_override_results - Prints protection override - * results. - * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size - * for FW Asserts results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_fw_asserts_results - Prints FW Asserts results - * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the FW Asserts results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_dbg_parse_attn - Parses and prints attention registers values in - * the specified results struct. - * - * @param p_hwfn - HW device data - * @param results - Pointer to the attention read results - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, - struct dbg_attn_block_result *results); - /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL @@ -3745,19 +2375,28 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, /* Win 13 */ #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL +/* Returns the VOQ based on port and TC */ +#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \ + PURE_LB_TC ? NUM_OF_PHYS_TCS *\ + MAX_NUM_PORTS_BB + \ + (port) : (port) * \ + (max_phys_tcs_per_port) + (tc)) + +struct init_qm_pq_params; + /** - * @brief qed_qm_pf_mem_size - prepare QM ILT sizes + * qed_qm_pf_mem_size(): Prepare QM ILT sizes. * - * Returns the required host memory size in 4KB units. - * Must be called before all QM init HSI functions. + * @num_pf_cids: Number of connections used by this PF. + * @num_vf_cids: Number of connections used by VFs of this PF. + * @num_tids: Number of tasks used by this PF. + * @num_pf_pqs: Number of PQs used by this PF. + * @num_vf_pqs: Number of PQs used by VFs of this PF. * - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF + * Return: The required host memory size in 4KB units. * - * @return The required host memory size in 4KB units. + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. */ u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, @@ -3771,8 +2410,19 @@ struct qed_qm_common_rt_init_params { bool global_rl_en; bool vport_wfq_en; struct init_qm_port_params *port_params; + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]; }; +/** + * qed_qm_common_rt_init(): Prepare QM runtime init values for the + * engine phase. + * + * @p_hwfn: HW device data. + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_common_rt_init_params *p_params); @@ -3789,85 +2439,116 @@ struct qed_qm_pf_rt_init_params { u16 num_vf_pqs; u16 start_vport; u16 num_vports; + u16 start_rl; + u16 num_rls; u16 pf_wfq; u32 pf_rl; + u32 link_speed; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; + struct init_qm_rl_params *rl_params; }; +/** + * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params); + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); /** - * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF + * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @pf_id: PF ID + * @pf_wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF + * qed_init_pf_rl(): Initializes the rate limit of the specified PF * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF ID. + * @pf_rl: rate limit in Mb/sec units * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); /** - * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT + * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param first_tx_pq_id- An array containing the first Tx PQ ID associated - * with the VPORT for each TC. This array is filled by - * qed_qm_pf_rt_init - * @param vport_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @first_tx_pq_id: An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** - * @brief qed_init_global_rl - Initializes the rate limit of the specified - * rate limiter + * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified + * VPORT and TC. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param rl_id - RL ID - * @param rate_limit - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC. + * (filled by qed_qm_pf_rt_init). + * @weight: VPORT+TC WFQ weight. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. + */ +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 weight); + +/** + * qed_init_global_rl(): Initializes the rate limit of the specified + * rate limiter. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @rl_id: RL ID. + * @rate_limit: Rate limit in Mb/sec units + * @vport_rl_type: Vport RL type. + * + * Return: 0 on success, -1 on error. */ int qed_init_global_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 rl_id, u32 rate_limit); + u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type); /** - * @brief qed_send_qm_stop_cmd Sends a stop command to the QM + * qed_send_qm_stop_cmd(): Sends a stop command to the QM. * - * @param p_hwfn - * @param p_ptt - * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @is_release_cmd: true for release, false for stop. + * @is_tx_pq: true for Tx PQs, false for Other PQs. + * @start_pq: first PQ ID to stop + * @num_pqs: Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occurred while waiting for - * QM command done. + * Return: Bool, true if successful, false if timeout occurred while waiting + * for QM command done. */ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3875,53 +2556,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, bool is_tx_pq, u16 start_pq, u16 num_pqs); /** - * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port + * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - vxlan destination udp port. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: vxlan destination udp port. + * + * Return: Void. */ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @vxlan_enable: vxlan enable flag. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param vxlan_enable - vxlan enable flag. + * Return: Void. */ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_gre_enable(): Enable or disable GRE tunnel in HW. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param eth_gre_enable - eth GRE enable enable flag. - * @param ip_gre_enable - IP GRE enable enable flag. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_gre_enable: Eth GRE enable flag. + * @ip_gre_enable: IP GRE enable flag. + * + * Return: Void. */ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable); /** - * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port + * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: Geneve destination udp port. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - geneve destination udp port. + * Retur: Void. */ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_geneve_enable: Eth GENEVE enable flag. + * @ip_geneve_enable: IP GENEVE enable flag. * - * @param p_ptt - ptt window used for writing the registers. - * @param eth_geneve_enable - eth GENEVE enable enable flag. - * @param ip_geneve_enable - IP GENEVE enable enable flag. + * Return: Void. */ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3931,25 +2623,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable); /** - * @brief qed_gft_disable - Disable GFT + * qed_gft_disable(): Disable GFT. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to disable GFT. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to disable GFT. + * + * Return: Void. */ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); /** - * @brief qed_gft_config - Enable and configure HW for GFT + * qed_gft_config(): Enable and configure HW for GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to enable GFT. + * @tcp: Set profile tcp packets. + * @udp: Set profile udp packet. + * @ipv4: Set profile ipv4 packet. + * @ipv6: Set profile ipv6 packet. + * @profile_type: Define packet same fields. Use enum gft_profile_type. * - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to enable GFT. - * @param tcp - set profile tcp packets. - * @param udp - set profile udp packet. - * @param ipv4 - set profile ipv4 packet. - * @param ipv6 - set profile ipv6 packet. - * @param profile_type - define packet same fields. Use enum gft_profile_type. + * Return: Void. */ void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3959,438 +2655,135 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool ipv4, bool ipv6, enum gft_profile_type profile_type); /** - * @brief qed_enable_context_validation - Enable and configure context - * validation. + * qed_enable_context_validation(): Enable and configure context + * validation. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. + * Return: Void. */ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_calc_session_ctx_validation - Calcualte validation byte for - * session context. + * qed_calc_session_ctx_validation(): Calcualte validation byte for + * session context. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param cid - context cid. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @cid: Context cid. + * + * Return: Void. */ void qed_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid); /** - * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task - * context. + * qed_calc_task_ctx_validation(): Calcualte validation byte for task + * context. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @tid: Context tid. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param tid - context tid. + * Return: Void. */ void qed_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid); /** - * @brief qed_memset_session_ctx - Memset session context to 0 while - * preserving validation bytes. + * qed_memset_session_ctx(): Memset session context to 0 while + * preserving validation bytes. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Size to initialzie. + * @ctx_type: Context type. * - * @param p_hwfn - - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * Return: Void. */ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); /** - * @brief qed_memset_task_ctx - Memset task context to 0 while preserving - * validation bytes. + * qed_memset_task_ctx(): Memset task context to 0 while preserving + * validation bytes. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: size to initialzie. + * @ctx_type: context type. + * + * Return: Void. */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** - * @brief qed_set_rdma_error_level - Sets the RDMA assert level. - * If the severity of the error will be - * above the level, the FW will assert. - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers - * @param assert_level - An array of assert levels for each storm. + * qed_set_rdma_error_level(): Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @assert_level: An array of assert levels for each storm. * + * Return: Void. */ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]); /** - * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory. + * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory. * - * @param p_hwfn - HW device data - * @param fw_overlay_in_buf - the input FW overlay buffer. - * @param buf_size - the size of the input FW overlay buffer in bytes. - * must be aligned to dwords. - * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * @p_hwfn: HW device data. + * @fw_overlay_in_buf: The input FW overlay buffer. + * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes. + * must be aligned to dwords. * - * @return a pointer to the allocated overlays memory, + * Return: A pointer to the allocated overlays memory, * or NULL in case of failures. */ struct phys_mem_desc * qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, - const u32 * const fw_overlay_in_buf, + const u32 *const fw_overlay_in_buf, u32 buf_size_in_bytes); /** - * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM. + * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_overlay_mem: the allocated FW overlay memory. * - * @param p_hwfn - HW device data. - * @param p_ptt - ptt window used for writing the registers. - * @param fw_overlay_mem - the allocated FW overlay memory. + * Return: Void. */ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct phys_mem_desc *fw_overlay_mem); /** - * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory. + * qed_fw_overlay_mem_free(): Frees the FW overlay memory. * - * @param p_hwfn - HW device data. - * @param fw_overlay_mem - the allocated FW overlay memory to free. + * @p_hwfn: HW device data. + * @fw_overlay_mem: The allocated FW overlay memory to free. + * + * Return: Void. */ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem); + struct phys_mem_desc **fw_overlay_mem); -/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) - -/* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) \ - (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) - -/* Tstorm ll2 port statistics */ -#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ - (IRO[2].base + ((port_id) * IRO[2].m1)) -#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) - -/* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ - (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) - -/* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ - (IRO[4].base + ((pf_id) * IRO[4].m1)) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) - -/* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) \ - (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) - -/* Ustorm eth queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ - (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) - -/* Ustorm Common Queue ring consumer */ -#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ - (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) - -/* Xstorm common PQ info */ -#define XSTORM_PQ_INFO_OFFSET(pq_id) \ - (IRO[8].base + ((pq_id) * IRO[8].m1)) -#define XSTORM_PQ_INFO_SIZE (IRO[8].size) - -/* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) - -/* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) - -/* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) - -/* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) - -/* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) - -/* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size) - -/* Xstorm overlay buffer host address */ -#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base) -#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size) - -/* Ystorm overlay buffer host address */ -#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base) -#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size) - -/* Pstorm overlay buffer host address */ -#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base) -#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size) - -/* Tstorm overlay buffer host address */ -#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base) -#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size) - -/* Mstorm overlay buffer host address */ -#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base) -#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size) - -/* Ustorm overlay buffer host address */ -#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base) -#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size) - -/* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size) - -/* Tstorm LightL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size) - -/* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size) - -/* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size) - -/* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size) - -/* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size) - -/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size - * mode - */ -#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ - (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2)) -#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size) - -/* Mstorm ETH PF queues producers */ -#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ - (IRO[28].base + ((queue_id) * IRO[28].m1)) -#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size) - -/* Mstorm pf statistics */ -#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[29].base + ((pf_id) * IRO[29].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size) - -/* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[30].base + ((stat_counter_id) * IRO[30].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[30].size) - -/* Ustorm pf statistics */ -#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[31].base + ((pf_id) * IRO[31].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size) - -/* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[32].base + ((stat_counter_id) * IRO[32].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size) - -/* Pstorm pf statistics */ -#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size) - -/* Control frame's EthType configuration for TX control frame security */ -#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ - (IRO[34].base + ((eth_type_id) * IRO[34].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size) - -/* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size) - -/* Tstorm Eth limit Rx rate */ -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ - (IRO[36].base + ((pf_id) * IRO[36].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size) - -/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. - * Use eth_tstorm_rss_update_data for update - */ -#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size) - -/* Xstorm queue zone */ -#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[38].base + ((queue_id) * IRO[38].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size) - -/* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[39].base + ((rss_id) * IRO[39].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size) - -/* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[40].base + ((rss_id) * IRO[40].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size) - -/* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size) - -/* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size) - -/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, - * BDqueue-id +#define PCICFG_OFFSET 0x2000 +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 + +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs programmed + * for each PF. + * Since registers from 0x000-0x7ff are spilt across functions, each PF will + * have the same location for the same 4 bits */ -#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ - (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \ - ((bdq_id) * IRO[43].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size) - -/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ -#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ - (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \ - ((bdq_id) * IRO[44].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size) - -/* Tstorm iSCSI RX stats */ -#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[45].base + ((storage_func_id) * IRO[45].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size) - -/* Mstorm iSCSI RX stats */ -#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[46].base + ((storage_func_id) * IRO[46].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size) - -/* Ustorm iSCSI RX stats */ -#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[47].base + ((storage_func_id) * IRO[47].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size) - -/* Xstorm iSCSI TX stats */ -#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[48].base + ((storage_func_id) * IRO[48].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size) - -/* Ystorm iSCSI TX stats */ -#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[49].base + ((storage_func_id) * IRO[49].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size) - -/* Pstorm iSCSI TX stats */ -#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[50].base + ((storage_func_id) * IRO[50].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size) - -/* Tstorm FCoE RX stats */ -#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size) - -/* Pstorm FCoE TX stats */ -#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size) - -/* Pstorm RDMA queue statistics */ -#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size) - -/* Tstorm RDMA queue statistics */ -#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size) - -/* Xstorm error level for assert */ -#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[55].base + ((pf_id) * IRO[55].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size) - -/* Ystorm error level for assert */ -#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[56].base + ((pf_id) * IRO[56].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size) - -/* Pstorm error level for assert */ -#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[57].base + ((pf_id) * IRO[57].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size) - -/* Tstorm error level for assert */ -#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[58].base + ((pf_id) * IRO[58].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size) - -/* Mstorm error level for assert */ -#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[59].base + ((pf_id) * IRO[59].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size) - -/* Ustorm error level for assert */ -#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[60].base + ((pf_id) * IRO[60].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size) - -/* Xstorm iWARP rxmit stats */ -#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[61].base + ((pf_id) * IRO[61].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size) - -/* Tstorm RoCE Event Statistics */ -#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[62].base + ((roce_pf_id) * IRO[62].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size) - -/* DCQCN Received Statistics */ -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\ - (IRO[63].base + ((roce_pf_id) * IRO[63].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size) - -/* RoCE Error Statistics */ -#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[64].base + ((roce_pf_id) * IRO[64].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size) - -/* DCQCN Sent Statistics */ -#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[65].base + ((roce_pf_id) * IRO[65].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size) - -/* RoCE CQEs Statistics */ -#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[66].base + ((roce_pf_id) * IRO[66].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size) +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff /* Runtime array offsets */ #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 @@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, #define QM_REG_TXPQMAP_RT_SIZE 512 #define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32580 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 32580 +#define QM_REG_WFQVPMAP_RT_OFFSET 33092 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_PTRTBLTX_RT_OFFSET 33092 +#define QM_REG_PTRTBLTX_RT_OFFSET 33604 #define QM_REG_PTRTBLTX_RT_SIZE 1024 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 - -#define RUNTIME_ARRAY_SIZE 34472 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983 + +#define RUNTIME_ARRAY_SIZE 34984 /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx { __le32 reserved[60]; }; -struct e4_xstorm_eth_conn_ag_ctx { +struct xstorm_eth_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; __le16 e5_reserved1; @@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx { __le32 reserved[8]; }; -struct e4_ystorm_eth_conn_ag_ctx { +struct ystorm_eth_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 tx_q0_int_coallecing_timeset; u8 byte3; __le16 word0; @@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx { __le32 reg3; }; -struct e4_tstorm_eth_conn_ag_ctx { +struct tstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_eth_conn_ag_ctx { +struct ustorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx { }; /* eth connection context */ -struct e4_eth_conn_context { +struct eth_conn_context { struct tstorm_eth_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context; }; @@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id { ETH_RAMROD_RX_ADD_UDP_FILTER, ETH_RAMROD_RX_DELETE_UDP_FILTER, ETH_RAMROD_RX_CREATE_GFT_ACTION, - ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, ETH_RAMROD_TX_QUEUE_UPDATE, ETH_RAMROD_RGFS_FILTER_ADD, ETH_RAMROD_RGFS_FILTER_DEL, @@ -5596,10 +3991,12 @@ struct eth_vport_rss_config { u8 update_rss_ind_table; u8 update_rss_capabilities; u8 tbl_size; - __le32 reserved2[2]; + u8 ind_table_mask_valid; + u8 reserved2[3]; __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS]; __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; - __le32 reserved3[2]; + __le32 reserved3; }; /* eth vport RSS mode */ @@ -5674,8 +4071,20 @@ enum gft_filter_update_action { MAX_GFT_FILTER_UPDATE_ACTION }; +/* Ramrod data for rx create gft action */ +struct rx_create_gft_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* Ramrod data for rx create openflow action */ +struct rx_create_openflow_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + /* Ramrod data for rx add openflow filter */ -struct rx_add_openflow_filter_data { +struct rx_openflow_filter_ramrod_data { __le16 action_icid; u8 priority; u8 reserved0; @@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data { __le16 l4_src_port; }; -/* Ramrod data for rx create gft action */ -struct rx_create_gft_action_data { - u8 vport_id; - u8 reserved[7]; -}; - -/* Ramrod data for rx create openflow action */ -struct rx_create_openflow_action_data { - u8 vport_id; - u8 reserved[7]; -}; - /* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { __le16 rx_queue_id; @@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data { }; /* Ramrod data for rx Add UDP Filter */ -struct rx_udp_filter_data { +struct rx_udp_filter_ramrod_data { __le16 action_icid; __le16 vlan_id; u8 ip_type; @@ -5784,7 +4181,7 @@ struct rx_udp_filter_data { /* Add or delete GFT filter - filter is packet header of type of packet wished * to pass certain FW flow. */ -struct rx_update_gft_filter_data { +struct rx_update_gft_filter_ramrod_data { struct regpair pkt_hdr_addr; __le16 pkt_hdr_length; __le16 action_icid; @@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data { u8 pxp_tph_valid_bd; u8 pxp_tph_valid_pkt; __le16 pxp_st_index; - __le16 comp_agg_size; + u8 comp_agg_size; + u8 reserved3; __le16 queue_zone_id; __le16 reserved2; __le16 pbl_size; @@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn { u8 ctl_frame_ethtype_check_en; u8 update_in_to_in_pri_map_mode; u8 in_to_in_pri_map[8]; - u8 reserved[6]; + u8 update_tx_dst_port_mode_flg; + u8 tx_dst_port_mode_config; + u8 dst_vport_id; + u8 tx_dst_port_mode; + u8 dst_vport_id_valid; + u8 reserved[1]; }; struct vport_update_ramrod_mcast { @@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { +struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart { u8 reserved0; u8 state; u8 flags0; @@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { __le32 reg4; }; -struct e4_mstorm_eth_conn_ag_ctx { +struct mstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_xstorm_eth_hw_conn_ag_ctx { +struct xstorm_eth_hw_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; __le16 e5_reserved1; @@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped { #define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 }; - /* Used in gft_profile_key: Indication for ip version */ enum gft_profile_ip_version { GFT_PROFILE_IPV4 = 0, @@ -6640,49 +5042,49 @@ struct ystorm_rdma_task_st_ctx { struct regpair temp[4]; }; -struct e4_ystorm_rdma_task_ag_ctx { +struct ystorm_rdma_task_ag_ctx { u8 reserved; u8 byte1; __le16 msem_ctx_upd_seq; u8 flags0; -#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 u8 flags1; -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; @@ -6696,49 +5098,49 @@ struct e4_ystorm_rdma_task_ag_ctx { __le32 fbo_hi; }; -struct e4_mstorm_rdma_task_ag_ctx { +struct mstorm_rdma_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 u8 flags1; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; @@ -6762,56 +5164,56 @@ struct ustorm_rdma_task_st_ctx { struct regpair temp[6]; }; -struct e4_ustorm_rdma_task_ag_ctx { +struct ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; __le16 icid; u8 flags0; -#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 u8 flags1; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 dif_rxmit_cons; @@ -6828,16 +5230,853 @@ struct e4_ustorm_rdma_task_ag_ctx { }; /* RDMA task context */ -struct e4_rdma_task_context { +struct rdma_task_context { struct ystorm_rdma_task_st_ctx ystorm_st_context; - struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct ystorm_rdma_task_ag_ctx ystorm_ag_context; struct tdif_task_context tdif_context; - struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; struct ustorm_rdma_task_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; + struct ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +#define TOE_MAX_RAMROD_PER_PF 8 +#define TOE_TX_PAGE_SIZE_BYTES 4096 +#define TOE_GRQ_PAGE_SIZE_BYTES 4096 +#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096 + +#define TOE_RX_MAX_RSS_CHAINS 64 +#define TOE_TX_MAX_TSS_CHAINS 64 +#define TOE_RSS_INDIRECTION_TABLE_SIZE 128 + +/* The toe storm context of Mstorm */ +struct mstorm_toe_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The toe storm context of Pstorm */ +struct pstorm_toe_conn_st_ctx { + __le32 reserved[36]; +}; + +/* The toe storm context of Ystorm */ +struct ystorm_toe_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The toe storm context of Xstorm */ +struct xstorm_toe_conn_st_ctx { + __le32 reserved[44]; +}; + +struct ystorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7 + u8 completion_opcode; + u8 byte3; + __le16 word0; + __le32 rel_seq; + __le32 rel_seq_threshold; + __le16 app_prod; + __le16 app_cons; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct xstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 word2; + __le16 word3; + __le16 bd_prod; + __le16 word5; + __le16 word6; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 local_adv_wnd_seq; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6 + u8 flags1; +#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; +}; + +struct ustorm_toe_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6 + u8 flags1; +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The toe storm context of Tstorm */ +struct tstorm_toe_conn_st_ctx { + __le32 reserved[16]; +}; + +/* The toe storm context of Ustorm */ +struct ustorm_toe_conn_st_ctx { + __le32 reserved[52]; +}; + +/* toe connection context */ +struct toe_conn_context { + struct ystorm_toe_conn_st_ctx ystorm_st_context; + struct pstorm_toe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_toe_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct ystorm_toe_conn_ag_ctx ystorm_ag_context; + struct xstorm_toe_conn_ag_ctx xstorm_ag_context; + struct tstorm_toe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_toe_conn_ag_ctx ustorm_ag_context; + struct tstorm_toe_conn_st_ctx tstorm_st_context; + struct mstorm_toe_conn_st_ctx mstorm_st_context; + struct ustorm_toe_conn_st_ctx ustorm_st_context; +}; + +/* toe init ramrod header */ +struct toe_init_ramrod_header { + u8 first_rss; + u8 num_rss; + u8 reserved[6]; +}; + +/* toe pf init parameters */ +struct toe_pf_init_params { + __le32 push_timeout; + __le16 grq_buffer_size; + __le16 grq_sb_id; + u8 grq_sb_index; + u8 max_seg_retransmit; + u8 doubt_reachability; + u8 ll2_rx_queue_id; + __le16 grq_fetch_threshold; + u8 reserved1[2]; + struct regpair grq_page_addr; +}; + +/* toe tss parameters */ +struct toe_tss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe rss parameters */ +struct toe_rss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe init ramrod data */ +struct toe_init_ramrod_data { + struct toe_init_ramrod_header hdr; + struct tcp_init_params tcp_params; + struct toe_pf_init_params pf_params; + struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS]; + struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS]; +}; + +/* toe offload parameters */ +struct toe_offload_params { + struct regpair tx_bd_page_addr; + struct regpair tx_app_page_addr; + __le32 more_to_send_seq; + __le16 rcv_indication_size; + u8 rss_tss_id; + u8 ignore_grq_push; + struct regpair rx_db_data_ptr; +}; + +/* TOE offload ramrod data - DMAed by firmware */ +struct toe_offload_ramrod_data { + struct tcp_offload_params tcp_ofld_params; + struct toe_offload_params toe_ofld_params; +}; + +/* TOE ramrod command IDs */ +enum toe_ramrod_cmd_id { + TOE_RAMROD_UNUSED, + TOE_RAMROD_FUNC_INIT, + TOE_RAMROD_INITATE_OFFLOAD, + TOE_RAMROD_FUNC_CLOSE, + TOE_RAMROD_SEARCHER_DELETE, + TOE_RAMROD_TERMINATE, + TOE_RAMROD_QUERY, + TOE_RAMROD_UPDATE, + TOE_RAMROD_EMPTY, + TOE_RAMROD_RESET_SEND, + TOE_RAMROD_INVALIDATE, + MAX_TOE_RAMROD_CMD_ID +}; + +/* Toe RQ buffer descriptor */ +struct toe_rx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_RX_BD_START_MASK 0x1 +#define TOE_RX_BD_START_SHIFT 0 +#define TOE_RX_BD_END_MASK 0x1 +#define TOE_RX_BD_END_SHIFT 1 +#define TOE_RX_BD_NO_PUSH_MASK 0x1 +#define TOE_RX_BD_NO_PUSH_SHIFT 2 +#define TOE_RX_BD_SPLIT_MASK 0x1 +#define TOE_RX_BD_SPLIT_SHIFT 3 +#define TOE_RX_BD_RESERVED0_MASK 0xFFF +#define TOE_RX_BD_RESERVED0_SHIFT 4 + __le32 reserved1; +}; + +/* TOE RX completion queue opcodes (opcode 0 is illegal) */ +enum toe_rx_cmp_opcode { + TOE_RX_CMP_OPCODE_GA = 1, + TOE_RX_CMP_OPCODE_GR = 2, + TOE_RX_CMP_OPCODE_GNI = 3, + TOE_RX_CMP_OPCODE_GAIR = 4, + TOE_RX_CMP_OPCODE_GAIL = 5, + TOE_RX_CMP_OPCODE_GRI = 6, + TOE_RX_CMP_OPCODE_GJ = 7, + TOE_RX_CMP_OPCODE_DGI = 8, + TOE_RX_CMP_OPCODE_CMP = 9, + TOE_RX_CMP_OPCODE_REL = 10, + TOE_RX_CMP_OPCODE_SKP = 11, + TOE_RX_CMP_OPCODE_URG = 12, + TOE_RX_CMP_OPCODE_RT_TO = 13, + TOE_RX_CMP_OPCODE_KA_TO = 14, + TOE_RX_CMP_OPCODE_MAX_RT = 15, + TOE_RX_CMP_OPCODE_DBT_RE = 16, + TOE_RX_CMP_OPCODE_SYN = 17, + TOE_RX_CMP_OPCODE_OPT_ERR = 18, + TOE_RX_CMP_OPCODE_FW2_TO = 19, + TOE_RX_CMP_OPCODE_2WY_CLS = 20, + TOE_RX_CMP_OPCODE_RST_RCV = 21, + TOE_RX_CMP_OPCODE_FIN_RCV = 22, + TOE_RX_CMP_OPCODE_FIN_UPL = 23, + TOE_RX_CMP_OPCODE_INIT = 32, + TOE_RX_CMP_OPCODE_RSS_UPDATE = 33, + TOE_RX_CMP_OPCODE_CLOSE = 34, + TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80, + TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81, + TOE_RX_CMP_OPCODE_TERMINATE = 82, + TOE_RX_CMP_OPCODE_QUERY = 83, + TOE_RX_CMP_OPCODE_RESET_SEND = 84, + TOE_RX_CMP_OPCODE_INVALIDATE = 85, + TOE_RX_CMP_OPCODE_EMPTY = 86, + TOE_RX_CMP_OPCODE_UPDATE = 87, + MAX_TOE_RX_CMP_OPCODE +}; + +/* TOE rx ooo completion data */ +struct toe_rx_cqe_ooo_params { + __le32 nbytes; + __le16 grq_buff_id; + u8 isle_num; + u8 reserved0; +}; + +/* TOE rx in order completion data */ +struct toe_rx_cqe_in_order_params { + __le32 nbytes; + __le16 grq_buff_id; + __le16 reserved1; +}; + +/* Union for TOE rx completion data */ +union toe_rx_cqe_data_union { + struct toe_rx_cqe_ooo_params ooo_params; + struct toe_rx_cqe_in_order_params in_order_params; + struct regpair raw_data; +}; + +/* TOE rx completion element */ +struct toe_rx_cqe { + __le16 icid; + u8 completion_opcode; + u8 reserved0; + __le32 reserved1; + union toe_rx_cqe_data_union data; +}; + +/* toe RX doorbel data */ +struct toe_rx_db_data { + __le32 local_adv_wnd_seq; + __le32 reserved[3]; +}; + +/* Toe GRQ buffer descriptor */ +struct toe_rx_grq_bd { + struct regpair addr; + __le16 buff_id; + __le16 reserved0; + __le32 reserved1; +}; + +/* Toe transmission application buffer descriptor */ +struct toe_tx_app_buff_desc { + __le32 next_buffer_start_seq; + __le32 reserved; +}; + +/* Toe transmission application buffer descriptor page pointer */ +struct toe_tx_app_buff_page_pointer { + struct regpair next_page_addr; +}; + +/* Toe transmission buffer descriptor */ +struct toe_tx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_TX_BD_PUSH_MASK 0x1 +#define TOE_TX_BD_PUSH_SHIFT 0 +#define TOE_TX_BD_NOTIFY_MASK 0x1 +#define TOE_TX_BD_NOTIFY_SHIFT 1 +#define TOE_TX_BD_LARGE_IO_MASK 0x1 +#define TOE_TX_BD_LARGE_IO_SHIFT 2 +#define TOE_TX_BD_BD_CONS_MASK 0x1FFF +#define TOE_TX_BD_BD_CONS_SHIFT 3 + __le32 next_bd_start_seq; +}; + +/* TOE completion opcodes */ +enum toe_tx_cmp_opcode { + TOE_TX_CMP_OPCODE_DATA, + TOE_TX_CMP_OPCODE_TERMINATE, + TOE_TX_CMP_OPCODE_EMPTY, + TOE_TX_CMP_OPCODE_RESET_SEND, + TOE_TX_CMP_OPCODE_INVALIDATE, + TOE_TX_CMP_OPCODE_RST_RCV, + MAX_TOE_TX_CMP_OPCODE +}; + +/* Toe transmission completion element */ +struct toe_tx_cqe { + __le16 icid; + u8 opcode; + u8 reserved; + __le32 size; +}; + +/* Toe transmission page pointer bd */ +struct toe_tx_page_pointer_bd { + struct regpair next_page_addr; + struct regpair prev_page_addr; +}; + +/* Toe transmission completion element page pointer */ +struct toe_tx_page_pointer_cqe { + struct regpair next_page_addr; +}; + +/* toe update parameters */ +struct toe_update_params { + __le16 flags; +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1 +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0 +#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF +#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1 + __le16 rcv_indication_size; + __le16 reserved1[2]; +}; + +/* TOE update ramrod data - DMAed by firmware */ +struct toe_update_ramrod_data { + struct tcp_update_params tcp_upd_params; + struct toe_update_params toe_upd_params; +}; + +struct mstorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* TOE doorbell data */ +struct toe_db_data { + u8 params; +#define TOE_DB_DATA_DEST_MASK 0x3 +#define TOE_DB_DATA_DEST_SHIFT 0 +#define TOE_DB_DATA_AGG_CMD_MASK 0x3 +#define TOE_DB_DATA_AGG_CMD_SHIFT 2 +#define TOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define TOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define TOE_DB_DATA_RESERVED_MASK 0x1 +#define TOE_DB_DATA_RESERVED_SHIFT 5 +#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; }; /* rdma function init ramrod data */ @@ -6911,6 +6150,8 @@ enum rdma_event_opcode { RDMA_EVENT_CREATE_SRQ, RDMA_EVENT_MODIFY_SRQ, RDMA_EVENT_DESTROY_SRQ, + RDMA_EVENT_START_NAMESPACE_TRACKING, + RDMA_EVENT_STOP_NAMESPACE_TRACKING, MAX_RDMA_EVENT_OPCODE }; @@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr { u8 relaxed_ordering; __le16 first_reg_srq_id; __le32 reg_srq_base_addr; - u8 searcher_mode; - u8 pvrdma_mode; + u8 flags; +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2 +#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F +#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3 + u8 dpt_byte_threshold_log; + u8 dpt_common_queue_id; u8 max_num_ns_log; - u8 reserved; }; /* rdma function init ramrod data */ struct rdma_init_func_ramrod_data { struct rdma_init_func_hdr params_header; + struct rdma_cnq_params dptq_params; struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; }; +/* rdma namespace tracking ramrod data */ +struct rdma_namespace_tracking_ramrod_data { + u8 name_space; + u8 reserved[7]; +}; + /* RDMA ramrod command IDs */ enum rdma_ramrod_cmd_id { RDMA_RAMROD_UNUSED, @@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id { RDMA_RAMROD_CREATE_SRQ, RDMA_RAMROD_MODIFY_SRQ, RDMA_RAMROD_DESTROY_SRQ, + RDMA_RAMROD_START_NS_TRACKING, + RDMA_RAMROD_STOP_NS_TRACKING, MAX_RDMA_RAMROD_CMD_ID }; @@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context { struct regpair temp[9]; }; -struct e4_tstorm_rdma_task_ag_ctx { +struct tstorm_rdma_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx { __le32 reg2; }; -struct e4_ustorm_rdma_conn_ag_ctx { +struct ustorm_rdma_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 nvmf_only; __le16 conn_dpi; @@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_roce_conn_ag_ctx { +struct xstorm_roce_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx { __le32 reg6; }; -struct e4_tstorm_roce_conn_ag_ctx { +struct tstorm_roce_conn_ag_ctx { u8 reserved0; u8 byte1; u8 flags0; -#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx { }; /* roce connection context */ -struct e4_roce_conn_context { +struct roce_conn_context { struct ystorm_roce_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_roce_conn_st_ctx pstorm_st_context; struct xstorm_roce_conn_st_ctx xstorm_st_context; - struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context; + struct xstorm_roce_conn_ag_ctx xstorm_ag_context; + struct tstorm_roce_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_roce_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_roce_conn_st_ctx mstorm_st_context; @@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data { #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3 u8 name_space; u8 reserved3[3]; __le16 regular_latency_phy_queue; @@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19 __le16 xrc_domain; u8 max_ird; u8 traffic_class; @@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data { u8 reserved3[3]; }; +/* RoCE Create Suspended qp requester runtime ramrod data */ +struct roce_create_suspended_qp_req_runtime_ramrod_data { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \ + 0x7FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; +}; + +/* RoCE Create Suspended QP requester ramrod data */ +struct roce_create_suspended_qp_req_ramrod_data { + struct roce_create_qp_req_ramrod_data qp_params; + struct roce_create_suspended_qp_req_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE Create Suspended QP responder runtime params */ +struct roce_create_suspended_qp_resp_runtime_params { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; + __le32 resreved; +}; + +/* RoCE RDB array entry */ +struct roce_resp_qp_rdb_entry { + struct regpair atomic_data; + struct regpair va; + __le32 psn; + __le32 rkey; + __le32 byte_count; + u8 op_type; + u8 reserved[3]; +}; + +/* RoCE Create Suspended QP responder runtime ramrod data */ +struct roce_create_suspended_qp_resp_runtime_ramrod_data { + struct roce_create_suspended_qp_resp_runtime_params params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Create Suspended QP responder ramrod data */ +struct roce_create_suspended_qp_resp_ramrod_data { + struct roce_create_qp_resp_ramrod_data + qp_params; + struct roce_create_suspended_qp_resp_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE create ud qp ramrod data */ +struct roce_create_ud_qp_ramrod_data { + __le16 local_mac_addr[3]; + __le16 vlan_id; + __le32 src_qp_id; + u8 name_space; + u8 reserved[3]; +}; + /* roce DCQCN received statistics */ struct roce_dcqcn_received_stats { struct regpair ecn_pkt_rcv; struct regpair cnp_pkt_rcv; + struct regpair cnp_pkt_reject; }; /* roce DCQCN sent statistics */ @@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data { __le32 reserved; }; +/* RoCE destroy ud qp ramrod data */ +struct roce_destroy_ud_qp_ramrod_data { + __le32 src_qp_id; + __le32 reserved; +}; + /* roce error statistics */ struct roce_error_stats { __le32 resp_remote_access_errors; @@ -7809,13 +7152,21 @@ struct roce_events_stats { /* roce slow path EQ cmd IDs */ enum roce_event_opcode { - ROCE_EVENT_CREATE_QP = 11, + ROCE_EVENT_CREATE_QP = 13, ROCE_EVENT_MODIFY_QP, ROCE_EVENT_QUERY_QP, ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, ROCE_EVENT_FUNC_UPDATE, + ROCE_EVENT_SUSPEND_QP, + ROCE_EVENT_QUERY_SUSPENDED_QP, + ROCE_EVENT_CREATE_SUSPENDED_QP, + ROCE_EVENT_RESUME_QP, + ROCE_EVENT_SUSPEND_UD_QP, + ROCE_EVENT_RESUME_UD_QP, + ROCE_EVENT_CREATE_SUSPENDED_UD_QP, + ROCE_EVENT_FLUSH_DPT_QP, MAX_ROCE_EVENT_OPCODE }; @@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data { struct roce_init_func_params roce; }; +/* roce_ll2_cqe_data */ +struct roce_ll2_cqe_data { + u8 name_space; + u8 flags; +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1 +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0 +#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F +#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1 + u8 reserved1[2]; + __le32 cid; +}; + /* roce modify qp requester ramrod data */ struct roce_modify_qp_req_ramrod_data { __le16 flags; @@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data { #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15 u8 fields; #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 @@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data { #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12 u8 fields; #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 @@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* RoCE Query Suspended QP requester output params */ +struct roce_query_suspended_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; + __le32 reserved; +}; + +/* RoCE Query Suspended QP requester ramrod data */ +struct roce_query_suspended_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE Query Suspended QP responder runtime params */ +struct roce_query_suspended_qp_resp_runtime_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; +}; + +/* RoCE Query Suspended QP responder output params */ +struct roce_query_suspended_qp_resp_output_params { + struct roce_query_suspended_qp_resp_runtime_params runtime_params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Query Suspended QP responder ramrod data */ +struct roce_query_suspended_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + /* ROCE ramrod command IDs */ enum roce_ramrod_cmd_id { - ROCE_RAMROD_CREATE_QP = 11, + ROCE_RAMROD_CREATE_QP = 13, ROCE_RAMROD_MODIFY_QP, ROCE_RAMROD_QUERY_QP, ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, ROCE_RAMROD_FUNC_UPDATE, + ROCE_RAMROD_SUSPEND_QP, + ROCE_RAMROD_QUERY_SUSPENDED_QP, + ROCE_RAMROD_CREATE_SUSPENDED_QP, + ROCE_RAMROD_RESUME_QP, + ROCE_RAMROD_SUSPEND_UD_QP, + ROCE_RAMROD_RESUME_UD_QP, + ROCE_RAMROD_CREATE_SUSPENDED_UD_QP, + ROCE_RAMROD_FLUSH_DPT_QP, MAX_ROCE_RAMROD_CMD_ID }; +/* ROCE RDB array entry type */ +enum roce_resp_qp_rdb_entry_type { + ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0, + ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1, + ROCE_QP_RDB_ENTRY_INVALID = 2, + MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE +}; + /* RoCE func init ramrod data */ struct roce_update_func_params { u8 cnp_vlan_priority; @@ -7995,7 +7428,7 @@ struct roce_update_func_params { __le32 cnp_send_timeout; }; -struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; u8 flags0; @@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { __le32 reg4; }; -struct e4_mstorm_roce_conn_ag_ctx { +struct mstorm_roce_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_mstorm_roce_req_conn_ag_ctx { +struct mstorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_mstorm_roce_resp_conn_ag_ctx { +struct mstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_tstorm_roce_req_conn_ag_ctx { +struct tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 dif_rxmit_cnt; __le32 snd_nxt_psn; __le32 snd_max_psn; @@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx { __le32 reg10; }; -struct e4_tstorm_roce_resp_conn_ag_ctx { +struct tstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 psn_and_rxmit_id_echo; __le32 reg1; __le32 reg2; @@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_roce_req_conn_ag_ctx { +struct ustorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx { __le16 word3; }; -struct e4_ustorm_roce_resp_conn_ag_ctx { +struct ustorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_roce_req_conn_ag_ctx { +struct xstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx { __le32 orq_cons; }; -struct e4_xstorm_roce_resp_conn_ag_ctx { +struct xstorm_roce_resp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 irq_prod_shadow; @@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx { __le32 msn_and_syndrome; }; -struct e4_ystorm_roce_conn_ag_ctx { +struct ystorm_roce_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx { __le32 reg3; }; -struct e4_ystorm_roce_req_conn_ag_ctx { +struct ystorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx { __le32 reg3; }; -struct e4_ystorm_roce_resp_conn_ag_ctx { +struct ystorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx { __le32 reserved[48]; }; -struct e4_xstorm_iwarp_conn_ag_ctx { +struct xstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 u8 flags2; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx { __le32 reg17; }; -struct e4_tstorm_iwarp_conn_ag_ctx { +struct tstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 unaligned_nxt_seq; @@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx { }; /* iwarp connection context */ -struct e4_iwarp_conn_context { +struct iwarp_conn_context { struct ystorm_iwarp_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_iwarp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_iwarp_conn_st_ctx xstorm_st_context; - struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_iwarp_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_iwarp_conn_st_ctx mstorm_st_context; @@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, - IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, MAX_IWARP_EQE_ASYNC_OPCODE }; @@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion { /* iWARP completion queue types */ enum iwarp_eqe_sync_opcode { - IWARP_EVENT_TYPE_TCP_OFFLOAD = - 11, + IWARP_EVENT_TYPE_TCP_OFFLOAD = 13, IWARP_EVENT_TYPE_MPA_OFFLOAD, IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, IWARP_EVENT_TYPE_CREATE_QP, @@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code { IWARP_EXCEPTION_DETECTED_LLP_RESET, IWARP_EXCEPTION_DETECTED_IRQ_FULL, IWARP_EXCEPTION_DETECTED_RQ_EMPTY, - IWARP_EXCEPTION_DETECTED_SRQ_EMPTY, - IWARP_EXCEPTION_DETECTED_SRQ_LIMIT, IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, @@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data { struct regpair async_eqe_output_buf; struct regpair handle_for_async; struct regpair shared_queue_addr; + __le32 additional_setup_time; __le16 rcv_wnd; u8 stats_counter_id; - u8 reserved3[13]; + u8 reserved3[9]; }; /* iWARP TCP connection offload params passed by driver to FW */ @@ -9888,11 +9319,13 @@ struct iwarp_offload_params { struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; struct regpair handle_for_async; + __le32 additional_setup_time; __le16 physical_q0; __le16 physical_q1; u8 stats_counter_id; u8 mpa_mode; - u8 reserved[10]; + u8 src_vport_id; + u8 reserved[5]; }; /* iWARP query QP output params */ @@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data { /* iWARP Ramrod Command IDs */ enum iwarp_ramrod_cmd_id { - IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, IWARP_RAMROD_CMD_ID_CREATE_QP, @@ -9971,100 +9404,100 @@ struct unaligned_opaque_data { __le32 cid; }; -struct e4_mstorm_iwarp_conn_ag_ctx { +struct mstorm_iwarp_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 rcq_cons; __le16 rcq_cons_th; __le32 reg0; __le32 reg1; }; -struct e4_ustorm_iwarp_conn_ag_ctx { +struct ustorm_iwarp_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx { __le16 word3; }; -struct e4_ystorm_iwarp_conn_ag_ctx { +struct ystorm_iwarp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx { struct fcoe_wqe cached_wqes[16]; }; -struct e4_xstorm_fcoe_conn_ag_ctx { +struct xstorm_fcoe_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 u8 flags2; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx { u8 reserved[2]; }; -struct e4_tstorm_fcoe_conn_ag_ctx { +struct tstorm_fcoe_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; }; -struct e4_ustorm_fcoe_conn_ag_ctx { +struct ustorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx { u8 reserved0[4]; }; -struct e4_mstorm_fcoe_conn_ag_ctx { +struct mstorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; @@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx { }; /* fcoe connection context */ -struct e4_fcoe_conn_context { +struct fcoe_conn_context { struct ystorm_fcoe_conn_st_ctx ystorm_st_context; struct pstorm_fcoe_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_fcoe_conn_st_ctx xstorm_st_context; - struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; struct regpair xstorm_ag_padding[6]; struct ustorm_fcoe_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; struct tstorm_fcoe_conn_st_ctx tstorm_st_context; - struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; struct mstorm_fcoe_conn_st_ctx mstorm_st_context; }; @@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params { struct fcoe_stat_ramrod_data stat_ramrod_data; }; -struct e4_ystorm_fcoe_conn_ag_ctx { +struct ystorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx { __le32 reserved_iscsi[44]; }; -struct e4_xstorm_iscsi_conn_ag_ctx { +struct xstorm_iscsi_conn_ag_ctx { u8 cdu_validation; u8 state; u8 flags0; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 u8 flags6; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx { __le32 reg17; }; -struct e4_tstorm_iscsi_conn_ag_ctx { +struct tstorm_iscsi_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 rx_tcp_checksum_err_cnt; @@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx { __le16 word0; }; -struct e4_ustorm_iscsi_conn_ag_ctx { +struct ustorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx { __le32 reserved[44]; }; -struct e4_mstorm_iscsi_conn_ag_ctx { +struct mstorm_iscsi_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; @@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx { }; /* iscsi connection context */ -struct e4_iscsi_conn_context { +struct iscsi_conn_context { struct ystorm_iscsi_conn_st_ctx ystorm_st_context; struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct pb_context xpb2_context; struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; struct pb_context upb_context; struct tstorm_iscsi_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; struct ustorm_iscsi_conn_st_ctx ustorm_st_context; }; @@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params { struct tcp_init_params tcp_init; }; -struct e4_ystorm_iscsi_conn_ag_ctx { +struct ystorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx { __le32 reg3; }; -#define MFW_TRACE_SIGNATURE 0x25071946 - -/* The trace in the buffer */ -#define MFW_TRACE_EVENTID_MASK 0x00ffff -#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 -#define MFW_TRACE_PRM_SIZE_OFFSET 16 -#define MFW_TRACE_ENTRY_SIZE 3 - -struct mcp_trace { - u32 signature; /* Help to identify that the trace is valid */ - u32 size; /* the size of the trace buffer in bytes */ - u32 curr_level; /* 2 - all will be written to the buffer - * 1 - debug trace will not be written - * 0 - just errors will be written to the buffer - */ - u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means - * mask it. - */ - - /* Warning: the following pointers are assumed to be 32bits as they are - * used only in the MFW. - */ - u32 trace_prod; /* The next trace will be written to this offset */ - u32 trace_oldest; /* The oldest valid trace starts at this offset - * (usually very close after the current producer). - */ -}; - -#define VF_MAX_STATIC 192 - -#define MCP_GLOB_PATH_MAX 2 -#define MCP_PORT_MAX 2 -#define MCP_GLOB_PORT_MAX 4 -#define MCP_GLOB_FUNC_MAX 16 - -typedef u32 offsize_t; /* In DWORDS !!! */ -/* Offset from the beginning of the MCP scratchpad */ -#define OFFSIZE_OFFSET_SHIFT 0 -#define OFFSIZE_OFFSET_MASK 0x0000ffff -/* Size of specific element (not the whole array if any) */ -#define OFFSIZE_SIZE_SHIFT 16 -#define OFFSIZE_SIZE_MASK 0xffff0000 - -#define SECTION_OFFSET(_offsize) ((((_offsize & \ - OFFSIZE_OFFSET_MASK) >> \ - OFFSIZE_OFFSET_SHIFT) << 2)) - -#define QED_SECTION_SIZE(_offsize) (((_offsize & \ - OFFSIZE_SIZE_MASK) >> \ - OFFSIZE_SIZE_SHIFT) << 2) - -#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ - SECTION_OFFSET(_offsize) + \ - (QED_SECTION_SIZE(_offsize) * idx)) - -#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ - (_pub_base + offsetof(struct mcp_public_data, sections[_section])) - -/* PHY configuration */ -struct eth_phy_cfg { - u32 speed; -#define ETH_SPEED_AUTONEG 0x0 -#define ETH_SPEED_SMARTLINQ 0x8 - - u32 pause; -#define ETH_PAUSE_NONE 0x0 -#define ETH_PAUSE_AUTONEG 0x1 -#define ETH_PAUSE_RX 0x2 -#define ETH_PAUSE_TX 0x4 - - u32 adv_speed; - - u32 loopback_mode; -#define ETH_LOOPBACK_NONE 0x0 -#define ETH_LOOPBACK_INT_PHY 0x1 -#define ETH_LOOPBACK_EXT_PHY 0x2 -#define ETH_LOOPBACK_EXT 0x3 -#define ETH_LOOPBACK_MAC 0x4 -#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 -#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 -#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 -#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 -#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 - - u32 eee_cfg; -#define EEE_CFG_EEE_ENABLED BIT(0) -#define EEE_CFG_TX_LPI BIT(1) -#define EEE_CFG_ADV_SPEED_1G BIT(2) -#define EEE_CFG_ADV_SPEED_10G BIT(3) -#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 -#define EEE_TX_TIMER_USEC_OFFSET 4 -#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 -#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 -#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 - - u32 deprecated; - - u32 fec_mode; -#define FEC_FORCE_MODE_MASK 0x000000ff -#define FEC_FORCE_MODE_OFFSET 0 -#define FEC_FORCE_MODE_NONE 0x00 -#define FEC_FORCE_MODE_FIRECODE 0x01 -#define FEC_FORCE_MODE_RS 0x02 -#define FEC_FORCE_MODE_AUTO 0x07 -#define FEC_EXTENDED_MODE_MASK 0xffffff00 -#define FEC_EXTENDED_MODE_OFFSET 8 -#define ETH_EXT_FEC_NONE 0x00000100 -#define ETH_EXT_FEC_10G_NONE 0x00000200 -#define ETH_EXT_FEC_10G_BASE_R 0x00000400 -#define ETH_EXT_FEC_20G_NONE 0x00000800 -#define ETH_EXT_FEC_20G_BASE_R 0x00001000 -#define ETH_EXT_FEC_25G_NONE 0x00002000 -#define ETH_EXT_FEC_25G_BASE_R 0x00004000 -#define ETH_EXT_FEC_25G_RS528 0x00008000 -#define ETH_EXT_FEC_40G_NONE 0x00010000 -#define ETH_EXT_FEC_40G_BASE_R 0x00020000 -#define ETH_EXT_FEC_50G_NONE 0x00040000 -#define ETH_EXT_FEC_50G_BASE_R 0x00080000 -#define ETH_EXT_FEC_50G_RS528 0x00100000 -#define ETH_EXT_FEC_50G_RS544 0x00200000 -#define ETH_EXT_FEC_100G_NONE 0x00400000 -#define ETH_EXT_FEC_100G_BASE_R 0x00800000 -#define ETH_EXT_FEC_100G_RS528 0x01000000 -#define ETH_EXT_FEC_100G_RS544 0x02000000 - - u32 extended_speed; -#define ETH_EXT_SPEED_MASK 0x0000ffff -#define ETH_EXT_SPEED_OFFSET 0 -#define ETH_EXT_SPEED_AN 0x00000001 -#define ETH_EXT_SPEED_1G 0x00000002 -#define ETH_EXT_SPEED_10G 0x00000004 -#define ETH_EXT_SPEED_20G 0x00000008 -#define ETH_EXT_SPEED_25G 0x00000010 -#define ETH_EXT_SPEED_40G 0x00000020 -#define ETH_EXT_SPEED_50G_BASE_R 0x00000040 -#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080 -#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100 -#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200 -#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400 -#define ETH_EXT_ADV_SPEED_MASK 0xffff0000 -#define ETH_EXT_ADV_SPEED_OFFSET 16 -#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000 -#define ETH_EXT_ADV_SPEED_1G 0x00020000 -#define ETH_EXT_ADV_SPEED_10G 0x00040000 -#define ETH_EXT_ADV_SPEED_20G 0x00080000 -#define ETH_EXT_ADV_SPEED_25G 0x00100000 -#define ETH_EXT_ADV_SPEED_40G 0x00200000 -#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000 -#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000 -#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000 -#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000 -#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000 -}; - -struct port_mf_cfg { - u32 dynamic_cfg; -#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff -#define PORT_MF_CFG_OV_TAG_SHIFT 0 -#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK - - u32 reserved[1]; -}; - -struct eth_stats { - u64 r64; - u64 r127; - u64 r255; - u64 r511; - u64 r1023; - u64 r1518; - - union { - struct { - u64 r1522; - u64 r2047; - u64 r4095; - u64 r9216; - u64 r16383; - } bb0; - struct { - u64 unused1; - u64 r1519_to_max; - u64 unused2; - u64 unused3; - u64 unused4; - } ah0; - } u0; - - u64 rfcs; - u64 rxcf; - u64 rxpf; - u64 rxpp; - u64 raln; - u64 rfcr; - u64 rovr; - u64 rjbr; - u64 rund; - u64 rfrg; - u64 t64; - u64 t127; - u64 t255; - u64 t511; - u64 t1023; - u64 t1518; - - union { - struct { - u64 t2047; - u64 t4095; - u64 t9216; - u64 t16383; - } bb1; - struct { - u64 t1519_to_max; - u64 unused6; - u64 unused7; - u64 unused8; - } ah1; - } u1; - - u64 txpf; - u64 txpp; - - union { - struct { - u64 tlpiec; - u64 tncl; - } bb2; - struct { - u64 unused9; - u64 unused10; - } ah2; - } u2; - - u64 rbyte; - u64 rxuca; - u64 rxmca; - u64 rxbca; - u64 rxpok; - u64 tbyte; - u64 txuca; - u64 txmca; - u64 txbca; - u64 txcf; -}; - -struct brb_stats { - u64 brb_truncate[8]; - u64 brb_discard[8]; -}; - -struct port_stats { - struct brb_stats brb; - struct eth_stats eth; -}; - -struct couple_mode_teaming { - u8 port_cmt[MCP_GLOB_PORT_MAX]; -#define PORT_CMT_IN_TEAM (1 << 0) - -#define PORT_CMT_PORT_ROLE (1 << 1) -#define PORT_CMT_PORT_INACTIVE (0 << 1) -#define PORT_CMT_PORT_ACTIVE (1 << 1) - -#define PORT_CMT_TEAM_MASK (1 << 2) -#define PORT_CMT_TEAM0 (0 << 2) -#define PORT_CMT_TEAM1 (1 << 2) -}; - -#define LLDP_CHASSIS_ID_STAT_LEN 4 -#define LLDP_PORT_ID_STAT_LEN 4 -#define DCBX_MAX_APP_PROTOCOL 32 -#define MAX_SYSTEM_LLDP_TLV_DATA 32 - -enum _lldp_agent { - LLDP_NEAREST_BRIDGE = 0, - LLDP_NEAREST_NON_TPMR_BRIDGE, - LLDP_NEAREST_CUSTOMER_BRIDGE, - LLDP_MAX_LLDP_AGENTS -}; - -struct lldp_config_params_s { - u32 config; -#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff -#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 -#define LLDP_CONFIG_HOLD_MASK 0x00000f00 -#define LLDP_CONFIG_HOLD_SHIFT 8 -#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 -#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 -#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 -#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 -#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 -#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 - u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; -}; - -struct lldp_status_params_s { - u32 prefix_seq_num; - u32 status; - u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; - u32 suffix_seq_num; -}; - -struct dcbx_ets_feature { - u32 flags; -#define DCBX_ETS_ENABLED_MASK 0x00000001 -#define DCBX_ETS_ENABLED_SHIFT 0 -#define DCBX_ETS_WILLING_MASK 0x00000002 -#define DCBX_ETS_WILLING_SHIFT 1 -#define DCBX_ETS_ERROR_MASK 0x00000004 -#define DCBX_ETS_ERROR_SHIFT 2 -#define DCBX_ETS_CBS_MASK 0x00000008 -#define DCBX_ETS_CBS_SHIFT 3 -#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 -#define DCBX_ETS_MAX_TCS_SHIFT 4 -#define DCBX_OOO_TC_MASK 0x00000f00 -#define DCBX_OOO_TC_SHIFT 8 - u32 pri_tc_tbl[1]; -#define DCBX_TCP_OOO_TC (4) - -#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) -#define DCBX_CEE_STRICT_PRIORITY 0xf - u32 tc_bw_tbl[2]; - u32 tc_tsa_tbl[2]; -#define DCBX_ETS_TSA_STRICT 0 -#define DCBX_ETS_TSA_CBS 1 -#define DCBX_ETS_TSA_ETS 2 -}; - -#define DCBX_TCP_OOO_TC (4) -#define DCBX_TCP_OOO_K2_4PORT_TC (3) - -struct dcbx_app_priority_entry { - u32 entry; -#define DCBX_APP_PRI_MAP_MASK 0x000000ff -#define DCBX_APP_PRI_MAP_SHIFT 0 -#define DCBX_APP_PRI_0 0x01 -#define DCBX_APP_PRI_1 0x02 -#define DCBX_APP_PRI_2 0x04 -#define DCBX_APP_PRI_3 0x08 -#define DCBX_APP_PRI_4 0x10 -#define DCBX_APP_PRI_5 0x20 -#define DCBX_APP_PRI_6 0x40 -#define DCBX_APP_PRI_7 0x80 -#define DCBX_APP_SF_MASK 0x00000300 -#define DCBX_APP_SF_SHIFT 8 -#define DCBX_APP_SF_ETHTYPE 0 -#define DCBX_APP_SF_PORT 1 -#define DCBX_APP_SF_IEEE_MASK 0x0000f000 -#define DCBX_APP_SF_IEEE_SHIFT 12 -#define DCBX_APP_SF_IEEE_RESERVED 0 -#define DCBX_APP_SF_IEEE_ETHTYPE 1 -#define DCBX_APP_SF_IEEE_TCP_PORT 2 -#define DCBX_APP_SF_IEEE_UDP_PORT 3 -#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 - -#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 -#define DCBX_APP_PROTOCOL_ID_SHIFT 16 -}; - -struct dcbx_app_priority_feature { - u32 flags; -#define DCBX_APP_ENABLED_MASK 0x00000001 -#define DCBX_APP_ENABLED_SHIFT 0 -#define DCBX_APP_WILLING_MASK 0x00000002 -#define DCBX_APP_WILLING_SHIFT 1 -#define DCBX_APP_ERROR_MASK 0x00000004 -#define DCBX_APP_ERROR_SHIFT 2 -#define DCBX_APP_MAX_TCS_MASK 0x0000f000 -#define DCBX_APP_MAX_TCS_SHIFT 12 -#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 -#define DCBX_APP_NUM_ENTRIES_SHIFT 16 - struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; -}; - -struct dcbx_features { - struct dcbx_ets_feature ets; - u32 pfc; -#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff -#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 - -#define DCBX_PFC_FLAGS_MASK 0x0000ff00 -#define DCBX_PFC_FLAGS_SHIFT 8 -#define DCBX_PFC_CAPS_MASK 0x00000f00 -#define DCBX_PFC_CAPS_SHIFT 8 -#define DCBX_PFC_MBC_MASK 0x00004000 -#define DCBX_PFC_MBC_SHIFT 14 -#define DCBX_PFC_WILLING_MASK 0x00008000 -#define DCBX_PFC_WILLING_SHIFT 15 -#define DCBX_PFC_ENABLED_MASK 0x00010000 -#define DCBX_PFC_ENABLED_SHIFT 16 -#define DCBX_PFC_ERROR_MASK 0x00020000 -#define DCBX_PFC_ERROR_SHIFT 17 - - struct dcbx_app_priority_feature app; -}; - -struct dcbx_local_params { - u32 config; -#define DCBX_CONFIG_VERSION_MASK 0x00000007 -#define DCBX_CONFIG_VERSION_SHIFT 0 -#define DCBX_CONFIG_VERSION_DISABLED 0 -#define DCBX_CONFIG_VERSION_IEEE 1 -#define DCBX_CONFIG_VERSION_CEE 2 -#define DCBX_CONFIG_VERSION_STATIC 4 - - u32 flags; - struct dcbx_features features; -}; - -struct dcbx_mib { - u32 prefix_seq_num; - u32 flags; - struct dcbx_features features; - u32 suffix_seq_num; -}; - -struct lldp_system_tlvs_buffer_s { - u16 valid; - u16 length; - u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; -}; - -struct dcb_dscp_map { - u32 flags; -#define DCB_DSCP_ENABLE_MASK 0x1 -#define DCB_DSCP_ENABLE_SHIFT 0 -#define DCB_DSCP_ENABLE 1 - u32 dscp_pri_map[8]; -}; - -struct public_global { - u32 max_path; - u32 max_ports; -#define MODE_1P 1 -#define MODE_2P 2 -#define MODE_3P 3 -#define MODE_4P 4 - u32 debug_mb_offset; - u32 phymod_dbg_mb_offset; - struct couple_mode_teaming cmt; - s32 internal_temperature; - u32 mfw_ver; - u32 running_bundle_id; - s32 external_temperature; - u32 mdump_reason; - u64 reserved; - u32 data_ptr; - u32 data_size; -}; - -struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - u32 accum_ack; -}; - -struct public_path { - struct fw_flr_mb flr_mb; - u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; - - u32 process_kill; -#define PROCESS_KILL_COUNTER_MASK 0x0000ffff -#define PROCESS_KILL_COUNTER_SHIFT 0 -#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 -#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 -#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) -}; - -struct public_port { - u32 validity_map; - - u32 link_status; -#define LINK_STATUS_LINK_UP 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 -#define LINK_STATUS_PFC_ENABLED 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 -#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 -#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 -#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 -#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 -#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 -#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 -#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) -#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) -#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) -#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) -#define LINK_STATUS_SFP_TX_FAULT 0x00100000 -#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 -#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 -#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 -#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 -#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 -#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 - -#define LINK_STATUS_FEC_MODE_MASK 0x38000000 -#define LINK_STATUS_FEC_MODE_NONE (0 << 27) -#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27) -#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) - - u32 link_status1; - u32 ext_phy_fw_version; - u32 drv_phy_cfg_addr; - - u32 port_stx; - - u32 stat_nig_timer; - - struct port_mf_cfg port_mf_config; - struct port_stats stats; - - u32 media_type; -#define MEDIA_UNSPECIFIED 0x0 -#define MEDIA_SFPP_10G_FIBER 0x1 -#define MEDIA_XFP_FIBER 0x2 -#define MEDIA_DA_TWINAX 0x3 -#define MEDIA_BASE_T 0x4 -#define MEDIA_SFP_1G_FIBER 0x5 -#define MEDIA_MODULE_FIBER 0x6 -#define MEDIA_KR 0xf0 -#define MEDIA_NOT_PRESENT 0xff - - u32 lfa_status; - u32 link_change_count; - - struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; - struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; - struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; - - /* DCBX related MIB */ - struct dcbx_local_params local_admin_dcbx_mib; - struct dcbx_mib remote_dcbx_mib; - struct dcbx_mib operational_dcbx_mib; - - u32 reserved[2]; - - u32 transceiver_data; -#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff -#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 -#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 -#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 -#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 -#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 -#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 -#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 -#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 -#define ETH_TRANSCEIVER_TYPE_NONE 0x00 -#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff -#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 -#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 -#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 -#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 -#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 -#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 -#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 -#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 -#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 -#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a -#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b -#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c -#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d -#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e -#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f -#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 -#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 -#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 -#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 -#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 -#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 -#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 -#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 -#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a -#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b -#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c -#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d -#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e -#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f -#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 -#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 -#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a - - u32 wol_info; - u32 wol_pkt_len; - u32 wol_pkt_details; - struct dcb_dscp_map dcb_dscp_map; - - u32 eee_status; -#define EEE_ACTIVE_BIT BIT(0) -#define EEE_LD_ADV_STATUS_MASK 0x000000f0 -#define EEE_LD_ADV_STATUS_OFFSET 4 -#define EEE_1G_ADV BIT(1) -#define EEE_10G_ADV BIT(2) -#define EEE_LP_ADV_STATUS_MASK 0x00000f00 -#define EEE_LP_ADV_STATUS_OFFSET 8 -#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 -#define EEE_SUPPORTED_SPEED_OFFSET 12 -#define EEE_1G_SUPPORTED BIT(1) -#define EEE_10G_SUPPORTED BIT(2) - - u32 eee_remote; -#define EEE_REMOTE_TW_TX_MASK 0x0000ffff -#define EEE_REMOTE_TW_TX_OFFSET 0 -#define EEE_REMOTE_TW_RX_MASK 0xffff0000 -#define EEE_REMOTE_TW_RX_OFFSET 16 - - u32 reserved1; - u32 oem_cfg_port; -#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 -#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 -#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 -#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 -#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C -#define OEM_CFG_SCHED_TYPE_OFFSET 2 -#define OEM_CFG_SCHED_TYPE_ETS 0x1 -#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 -}; - -struct public_func { - u32 reserved0[2]; - - u32 mtu_size; - - u32 reserved[7]; - - u32 config; -#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 - -#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 -#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 -#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 -#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 -#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 -#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040 -#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040 - -#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 -#define FUNC_MF_CFG_MIN_BW_SHIFT 8 -#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 -#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 -#define FUNC_MF_CFG_MAX_BW_SHIFT 16 -#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 - - u32 status; -#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 - - u32 mac_upper; -#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff -#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 -#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK - u32 mac_lower; -#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff - - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; - - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; - - u32 ovlan_stag; -#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff -#define FUNC_MF_CFG_OV_STAG_SHIFT 0 -#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK - - u32 pf_allocation; - - u32 preserve_data; - - u32 driver_last_activity_ts; - - u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; - - u32 drv_id; -#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff -#define DRV_ID_PDA_COMP_VER_SHIFT 0 - -#define LOAD_REQ_HSI_VERSION 2 -#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 -#define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ - DRV_ID_MCP_HSI_VER_SHIFT) - -#define DRV_ID_DRV_TYPE_MASK 0x7f000000 -#define DRV_ID_DRV_TYPE_SHIFT 24 -#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) - -#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 -#define DRV_ID_DRV_INIT_HW_SHIFT 31 -#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) - - u32 oem_cfg_func; -#define OEM_CFG_FUNC_TC_MASK 0x0000000F -#define OEM_CFG_FUNC_TC_OFFSET 0 -#define OEM_CFG_FUNC_TC_0 0x0 -#define OEM_CFG_FUNC_TC_1 0x1 -#define OEM_CFG_FUNC_TC_2 0x2 -#define OEM_CFG_FUNC_TC_3 0x3 -#define OEM_CFG_FUNC_TC_4 0x4 -#define OEM_CFG_FUNC_TC_5 0x5 -#define OEM_CFG_FUNC_TC_6 0x6 -#define OEM_CFG_FUNC_TC_7 0x7 - -#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 -}; - -struct mcp_mac { - u32 mac_upper; - u32 mac_lower; -}; - -struct mcp_val64 { - u32 lo; - u32 hi; -}; - -struct mcp_file_att { - u32 nvm_start_addr; - u32 len; -}; - -struct bist_nvm_image_att { - u32 return_code; - u32 image_type; - u32 nvm_start_addr; - u32 len; -}; - -#define MCP_DRV_VER_STR_SIZE 16 -#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) -#define MCP_DRV_NVM_BUF_LEN 32 -struct drv_version_stc { - u32 version; - u8 name[MCP_DRV_VER_STR_SIZE - 4]; -}; - -struct lan_stats_stc { - u64 ucast_rx_pkts; - u64 ucast_tx_pkts; - u32 fcs_err; - u32 rserved; -}; - -struct fcoe_stats_stc { - u64 rx_pkts; - u64 tx_pkts; - u32 fcs_err; - u32 login_failure; -}; - -struct ocbb_data_stc { - u32 ocbb_host_addr; - u32 ocsd_host_addr; - u32 ocsd_req_update_interval; -}; - -#define MAX_NUM_OF_SENSORS 7 -struct temperature_status_stc { - u32 num_of_sensors; - u32 sensor[MAX_NUM_OF_SENSORS]; -}; - -/* crash dump configuration header */ -struct mdump_config_stc { - u32 version; - u32 config; - u32 epoc; - u32 num_of_logs; - u32 valid_logs; -}; - -enum resource_id_enum { - RESOURCE_NUM_SB_E = 0, - RESOURCE_NUM_L2_QUEUE_E = 1, - RESOURCE_NUM_VPORT_E = 2, - RESOURCE_NUM_VMQ_E = 3, - RESOURCE_FACTOR_NUM_RSS_PF_E = 4, - RESOURCE_FACTOR_RSS_PER_VF_E = 5, - RESOURCE_NUM_RL_E = 6, - RESOURCE_NUM_PQ_E = 7, - RESOURCE_NUM_VF_E = 8, - RESOURCE_VFC_FILTER_E = 9, - RESOURCE_ILT_E = 10, - RESOURCE_CQS_E = 11, - RESOURCE_GFT_PROFILES_E = 12, - RESOURCE_NUM_TC_E = 13, - RESOURCE_NUM_RSS_ENGINES_E = 14, - RESOURCE_LL2_QUEUE_E = 15, - RESOURCE_RDMA_STATS_QUEUE_E = 16, - RESOURCE_BDQ_E = 17, - RESOURCE_QCN_E = 18, - RESOURCE_LLH_FILTER_E = 19, - RESOURCE_VF_MAC_ADDR = 20, - RESOURCE_LL2_CQS_E = 21, - RESOURCE_VF_CNQS = 22, - RESOURCE_MAX_NUM, - RESOURCE_NUM_INVALID = 0xFFFFFFFF -}; - -/* Resource ID is to be filled by the driver in the MB request - * Size, offset & flags to be filled by the MFW in the MB response - */ -struct resource_info { - enum resource_id_enum res_id; - u32 size; /* number of allocated resources */ - u32 offset; /* Offset of the 1st resource */ - u32 vf_size; - u32 vf_offset; - u32 flags; -#define RESOURCE_ELEMENT_STRICT (1 << 0) -}; - -#define DRV_ROLE_NONE 0 -#define DRV_ROLE_PREBOOT 1 -#define DRV_ROLE_OS 2 -#define DRV_ROLE_KDUMP 3 - -struct load_req_stc { - u32 drv_ver_0; - u32 drv_ver_1; - u32 fw_ver; - u32 misc0; -#define LOAD_REQ_ROLE_MASK 0x000000FF -#define LOAD_REQ_ROLE_SHIFT 0 -#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 -#define LOAD_REQ_LOCK_TO_SHIFT 8 -#define LOAD_REQ_LOCK_TO_DEFAULT 0 -#define LOAD_REQ_LOCK_TO_NONE 255 -#define LOAD_REQ_FORCE_MASK 0x000F0000 -#define LOAD_REQ_FORCE_SHIFT 16 -#define LOAD_REQ_FORCE_NONE 0 -#define LOAD_REQ_FORCE_PF 1 -#define LOAD_REQ_FORCE_ALL 2 -#define LOAD_REQ_FLAGS0_MASK 0x00F00000 -#define LOAD_REQ_FLAGS0_SHIFT 20 -#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) -}; - -struct load_rsp_stc { - u32 drv_ver_0; - u32 drv_ver_1; - u32 fw_ver; - u32 misc0; -#define LOAD_RSP_ROLE_MASK 0x000000FF -#define LOAD_RSP_ROLE_SHIFT 0 -#define LOAD_RSP_HSI_MASK 0x0000FF00 -#define LOAD_RSP_HSI_SHIFT 8 -#define LOAD_RSP_FLAGS0_MASK 0x000F0000 -#define LOAD_RSP_FLAGS0_SHIFT 16 -#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) -}; - -struct mdump_retain_data_stc { - u32 valid; - u32 epoch; - u32 pf; - u32 status; -}; - -union drv_union_data { - u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; - struct mcp_mac wol_mac; - - struct eth_phy_cfg drv_phy_cfg; - - struct mcp_val64 val64; - - u8 raw_data[MCP_DRV_NVM_BUF_LEN]; - - struct mcp_file_att file_att; - - u32 ack_vf_disabled[VF_MAX_STATIC / 32]; - - struct drv_version_stc drv_version; - - struct lan_stats_stc lan_stats; - struct fcoe_stats_stc fcoe_stats; - struct ocbb_data_stc ocbb_info; - struct temperature_status_stc temp_info; - struct resource_info resource; - struct bist_nvm_image_att nvm_image_att; - struct mdump_config_stc mdump_config; -}; - -struct public_drv_mb { - u32 drv_mb_header; -#define DRV_MSG_CODE_MASK 0xffff0000 -#define DRV_MSG_CODE_LOAD_REQ 0x10000000 -#define DRV_MSG_CODE_LOAD_DONE 0x11000000 -#define DRV_MSG_CODE_INIT_HW 0x12000000 -#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000 -#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 -#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 -#define DRV_MSG_CODE_INIT_PHY 0x22000000 -#define DRV_MSG_CODE_LINK_RESET 0x23000000 -#define DRV_MSG_CODE_SET_DCBX 0x25000000 -#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 -#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 -#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 -#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 -#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 -#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 -#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 -#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 -#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 -#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 - -#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 -#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 -#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000 -#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000 -#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 -#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 -#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 -#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000 -#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 -#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 -#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 -#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 -#define DRV_MSG_CODE_MCP_RESET 0x00090000 -#define DRV_MSG_CODE_SET_VERSION 0x000f0000 -#define DRV_MSG_CODE_MCP_HALT 0x00100000 -#define DRV_MSG_CODE_SET_VMAC 0x00110000 -#define DRV_MSG_CODE_GET_VMAC 0x00120000 -#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 -#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 -#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 -#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 -#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 - -#define DRV_MSG_CODE_GET_STATS 0x00130000 -#define DRV_MSG_CODE_STATS_TYPE_LAN 1 -#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 -#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 -#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 - -#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 - -#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 - -#define DRV_MSG_CODE_BIST_TEST 0x001e0000 -#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 -#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 -/* Send crash dump commands with param[3:0] - opcode */ -#define DRV_MSG_CODE_MDUMP_CMD 0x00250000 -#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 -#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 -#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000 - -#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000 - -#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F -#define RESOURCE_CMD_REQ_RESC_SHIFT 0 -#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 -#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 -#define RESOURCE_OPCODE_REQ 1 -#define RESOURCE_OPCODE_REQ_WO_AGING 2 -#define RESOURCE_OPCODE_REQ_W_AGING 3 -#define RESOURCE_OPCODE_RELEASE 4 -#define RESOURCE_OPCODE_FORCE_RELEASE 5 -#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 -#define RESOURCE_CMD_REQ_AGE_SHIFT 8 - -#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF -#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 -#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 -#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 -#define RESOURCE_OPCODE_GNT 1 -#define RESOURCE_OPCODE_BUSY 2 -#define RESOURCE_OPCODE_RELEASED 3 -#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 -#define RESOURCE_OPCODE_WRONG_OWNER 5 -#define RESOURCE_OPCODE_UNKNOWN_CMD 255 - -#define RESOURCE_DUMP 0 - -/* DRV_MSG_CODE_MDUMP_CMD parameters */ -#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f -#define DRV_MSG_CODE_MDUMP_ACK 0x01 -#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 -#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 -#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 -#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 -#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 -#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 -#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 - -#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a -#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b -#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c - -#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 -#define DRV_MSG_CODE_OS_WOL 0x002e0000 - -#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 -#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 -#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 drv_mb_param; -#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 -#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 -#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 -#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF -#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 - -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 -#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 -#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF -#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 -#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 - -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 -#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 -#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 - -#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 -#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F -#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 -#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 -#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 -#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 - -#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 -#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF -#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 -#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 -#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 -#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF - -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 - -#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 -#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF - -#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ - DRV_MB_PARAM_WOL_DISABLED | \ - DRV_MB_PARAM_WOL_ENABLED) -#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP -#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED -#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED - -#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ - DRV_MB_PARAM_ESWITCH_MODE_VEB | \ - DRV_MB_PARAM_ESWITCH_MODE_VEPA) -#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 -#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 -#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 - -#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 -#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 - -#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 -#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 -#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 - -#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 -#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 - - /* Resource Allocation params - Driver version support */ -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - -#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 -#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 -#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 -#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 - -#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 -#define DRV_MB_PARAM_BIST_RC_PASSED 1 -#define DRV_MB_PARAM_BIST_RC_FAILED 2 -#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 - -#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 -#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 - -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 -#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 - -/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff - -/* Driver attributes params */ -#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 -#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff -#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 -#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 - -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 - - u32 fw_mb_header; -#define FW_MSG_CODE_MASK 0xffff0000 -#define FW_MSG_CODE_UNSUPPORTED 0x00000000 -#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 -#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 -#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000 -#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 -#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 -#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 -#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 -#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 -#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 -#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 -#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 -#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000 -#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 - -#define FW_MSG_CODE_NVM_OK 0x00010000 -#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 -#define FW_MSG_CODE_PHY_OK 0x00110000 -#define FW_MSG_CODE_OK 0x00160000 -#define FW_MSG_CODE_ERROR 0x00170000 -#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 -#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 -#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 - -#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 -#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 -#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000 -#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff - -#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000 -#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000 - -#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 - - u32 fw_mb_param; -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - - /* Get PF RDMA protocol command response */ -#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 -#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 -#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 -#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 - - /* Get MFW feature support response */ -#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) -#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) -#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) -#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) -#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) - -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) - -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 - -#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff -#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 - - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - - u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - - union drv_union_data union_data; -}; - -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 - -enum MFW_DRV_MSG_TYPE { - MFW_DRV_MSG_LINK_CHANGE, - MFW_DRV_MSG_FLR_FW_ACK_FAILED, - MFW_DRV_MSG_VF_DISABLED, - MFW_DRV_MSG_LLDP_DATA_UPDATED, - MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, - MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_ERROR_RECOVERY, - MFW_DRV_MSG_BW_UPDATE, - MFW_DRV_MSG_S_TAG_UPDATE, - MFW_DRV_MSG_GET_LAN_STATS, - MFW_DRV_MSG_GET_FCOE_STATS, - MFW_DRV_MSG_GET_ISCSI_STATS, - MFW_DRV_MSG_GET_RDMA_STATS, - MFW_DRV_MSG_FAILURE_DETECTED, - MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, - MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, - MFW_DRV_MSG_RESERVED, - MFW_DRV_MSG_GET_TLV_REQ, - MFW_DRV_MSG_OEM_CFG_UPDATE, - MFW_DRV_MSG_MAX -}; - -#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) -#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) -#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) -#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) - -struct public_mfw_mb { - u32 sup_msgs; - u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; - u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; -}; - -enum public_sections { - PUBLIC_DRV_MB, - PUBLIC_MFW_MB, - PUBLIC_GLOBAL, - PUBLIC_PATH, - PUBLIC_PORT, - PUBLIC_FUNC, - PUBLIC_MAX_SECTIONS -}; - -struct mcp_public_data { - u32 num_sections; - u32 sections[PUBLIC_MAX_SECTIONS]; - struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; - struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; - struct public_global global; - struct public_path path[MCP_GLOB_PATH_MAX]; - struct public_port port[MCP_GLOB_PORT_MAX]; - struct public_func func[MCP_GLOB_FUNC_MAX]; -}; - -#define MAX_I2C_TRANSACTION_SIZE 16 - -/* OCBB definitions */ -enum tlvs { - /* Category 1: Device Properties */ - DRV_TLV_CLP_STR, - DRV_TLV_CLP_STR_CTD, - /* Category 6: Device Configuration */ - DRV_TLV_SCSI_TO, - DRV_TLV_R_T_TOV, - DRV_TLV_R_A_TOV, - DRV_TLV_E_D_TOV, - DRV_TLV_CR_TOV, - DRV_TLV_BOOT_TYPE, - /* Category 8: Port Configuration */ - DRV_TLV_NPIV_ENABLED, - /* Category 10: Function Configuration */ - DRV_TLV_FEATURE_FLAGS, - DRV_TLV_LOCAL_ADMIN_ADDR, - DRV_TLV_ADDITIONAL_MAC_ADDR_1, - DRV_TLV_ADDITIONAL_MAC_ADDR_2, - DRV_TLV_LSO_MAX_OFFLOAD_SIZE, - DRV_TLV_LSO_MIN_SEGMENT_COUNT, - DRV_TLV_PROMISCUOUS_MODE, - DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, - DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, - DRV_TLV_OS_DRIVER_STATES, - DRV_TLV_PXE_BOOT_PROGRESS, - /* Category 12: FC/FCoE Configuration */ - DRV_TLV_NPIV_STATE, - DRV_TLV_NUM_OF_NPIV_IDS, - DRV_TLV_SWITCH_NAME, - DRV_TLV_SWITCH_PORT_NUM, - DRV_TLV_SWITCH_PORT_ID, - DRV_TLV_VENDOR_NAME, - DRV_TLV_SWITCH_MODEL, - DRV_TLV_SWITCH_FW_VER, - DRV_TLV_QOS_PRIORITY_PER_802_1P, - DRV_TLV_PORT_ALIAS, - DRV_TLV_PORT_STATE, - DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_LINK_FAILURE_COUNT, - DRV_TLV_FCOE_BOOT_PROGRESS, - /* Category 13: iSCSI Configuration */ - DRV_TLV_TARGET_LLMNR_ENABLED, - DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, - DRV_TLV_DATA_DIGEST_FLAG_ENABLED, - DRV_TLV_AUTHENTICATION_METHOD, - DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, - DRV_TLV_MAX_FRAME_SIZE, - DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_ISCSI_BOOT_PROGRESS, - /* Category 20: Device Data */ - DRV_TLV_PCIE_BUS_RX_UTILIZATION, - DRV_TLV_PCIE_BUS_TX_UTILIZATION, - DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, - DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, - DRV_TLV_NCSI_RX_BYTES_RECEIVED, - DRV_TLV_NCSI_TX_BYTES_SENT, - /* Category 22: Base Port Data */ - DRV_TLV_RX_DISCARDS, - DRV_TLV_RX_ERRORS, - DRV_TLV_TX_ERRORS, - DRV_TLV_TX_DISCARDS, - DRV_TLV_RX_FRAMES_RECEIVED, - DRV_TLV_TX_FRAMES_SENT, - /* Category 23: FC/FCoE Port Data */ - DRV_TLV_RX_BROADCAST_PACKETS, - DRV_TLV_TX_BROADCAST_PACKETS, - /* Category 28: Base Function Data */ - DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, - DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, - DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_PF_RX_FRAMES_RECEIVED, - DRV_TLV_RX_BYTES_RECEIVED, - DRV_TLV_PF_TX_FRAMES_SENT, - DRV_TLV_TX_BYTES_SENT, - DRV_TLV_IOV_OFFLOAD, - DRV_TLV_PCI_ERRORS_CAP_ID, - DRV_TLV_UNCORRECTABLE_ERROR_STATUS, - DRV_TLV_UNCORRECTABLE_ERROR_MASK, - DRV_TLV_CORRECTABLE_ERROR_STATUS, - DRV_TLV_CORRECTABLE_ERROR_MASK, - DRV_TLV_PCI_ERRORS_AECC_REGISTER, - DRV_TLV_TX_QUEUES_EMPTY, - DRV_TLV_RX_QUEUES_EMPTY, - DRV_TLV_TX_QUEUES_FULL, - DRV_TLV_RX_QUEUES_FULL, - /* Category 29: FC/FCoE Function Data */ - DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_FCOE_RX_FRAMES_RECEIVED, - DRV_TLV_FCOE_RX_BYTES_RECEIVED, - DRV_TLV_FCOE_TX_FRAMES_SENT, - DRV_TLV_FCOE_TX_BYTES_SENT, - DRV_TLV_CRC_ERROR_COUNT, - DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_1_TIMESTAMP, - DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_2_TIMESTAMP, - DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_3_TIMESTAMP, - DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_4_TIMESTAMP, - DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_5_TIMESTAMP, - DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, - DRV_TLV_LOSS_OF_SIGNAL_ERRORS, - DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, - DRV_TLV_DISPARITY_ERROR_COUNT, - DRV_TLV_CODE_VIOLATION_ERROR_COUNT, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, - DRV_TLV_LAST_FLOGI_TIMESTAMP, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, - DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, - DRV_TLV_LAST_FLOGI_RJT, - DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, - DRV_TLV_FDISCS_SENT_COUNT, - DRV_TLV_FDISC_ACCS_RECEIVED, - DRV_TLV_FDISC_RJTS_RECEIVED, - DRV_TLV_PLOGI_SENT_COUNT, - DRV_TLV_PLOGI_ACCS_RECEIVED, - DRV_TLV_PLOGI_RJTS_RECEIVED, - DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_1_TIMESTAMP, - DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_2_TIMESTAMP, - DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_3_TIMESTAMP, - DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_4_TIMESTAMP, - DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_5_TIMESTAMP, - DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_1_ACC_TIMESTAMP, - DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_2_ACC_TIMESTAMP, - DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_3_ACC_TIMESTAMP, - DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_4_ACC_TIMESTAMP, - DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_5_ACC_TIMESTAMP, - DRV_TLV_LOGOS_ISSUED, - DRV_TLV_LOGO_ACCS_RECEIVED, - DRV_TLV_LOGO_RJTS_RECEIVED, - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_1_TIMESTAMP, - DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_2_TIMESTAMP, - DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_3_TIMESTAMP, - DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_4_TIMESTAMP, - DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_5_TIMESTAMP, - DRV_TLV_LOGOS_RECEIVED, - DRV_TLV_ACCS_ISSUED, - DRV_TLV_PRLIS_ISSUED, - DRV_TLV_ACCS_RECEIVED, - DRV_TLV_ABTS_SENT_COUNT, - DRV_TLV_ABTS_ACCS_RECEIVED, - DRV_TLV_ABTS_RJTS_RECEIVED, - DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_1_TIMESTAMP, - DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_2_TIMESTAMP, - DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_3_TIMESTAMP, - DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_4_TIMESTAMP, - DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_5_TIMESTAMP, - DRV_TLV_RSCNS_RECEIVED, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, - DRV_TLV_LUN_RESETS_ISSUED, - DRV_TLV_ABORT_TASK_SETS_ISSUED, - DRV_TLV_TPRLOS_SENT, - DRV_TLV_NOS_SENT_COUNT, - DRV_TLV_NOS_RECEIVED_COUNT, - DRV_TLV_OLS_COUNT, - DRV_TLV_LR_COUNT, - DRV_TLV_LRR_COUNT, - DRV_TLV_LIP_SENT_COUNT, - DRV_TLV_LIP_RECEIVED_COUNT, - DRV_TLV_EOFA_COUNT, - DRV_TLV_EOFNI_COUNT, - DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, - DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, - DRV_TLV_SCSI_STATUS_BUSY_COUNT, - DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, - DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, - DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, - DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, - DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, - DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, - DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_1_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_2_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_3_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_4_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_5_TIMESTAMP, - /* Category 30: iSCSI Function Data */ - DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, - DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, - DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, - DRV_TLV_ISCSI_PDU_TX_BYTES_SENT -}; - -struct nvm_cfg_mac_address { - u32 mac_addr_hi; -#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff -#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 - - u32 mac_addr_lo; -}; - -struct nvm_cfg1_glob { - u32 generic_cont0; -#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 -#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 -#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 -#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 -#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 -#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 -#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 - - u32 engineering_change[3]; - u32 manufacturing_id; - u32 serial_number[4]; - u32 pcie_cfg; - u32 mgmt_traffic; - - u32 core_cfg; -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 - - u32 e_lane_cfg1; - u32 e_lane_cfg2; - u32 f_lane_cfg1; - u32 f_lane_cfg2; - u32 mps10_preemphasis; - u32 mps10_driver_current; - u32 mps25_preemphasis; - u32 mps25_driver_current; - u32 pci_id; - u32 pci_subsys_id; - u32 bar; - u32 mps10_txfir_main; - u32 mps10_txfir_post; - u32 mps25_txfir_main; - u32 mps25_txfir_post; - u32 manufacture_ver; - u32 manufacture_time; - u32 led_global_settings; - u32 generic_cont1; - - u32 mbi_version; -#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff -#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 -#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 -#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 -#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 -#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 - - u32 mbi_date; - u32 misc_sig; - - u32 device_capabilities; -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 - - u32 power_dissipated; - u32 power_consumed; - u32 efi_version; - u32 multi_net_modes_cap; - u32 reserved[41]; -}; - -struct nvm_cfg1_path { - u32 reserved[30]; -}; - -struct nvm_cfg1_port { - u32 rel_to_opt123; - u32 rel_to_opt124; - - u32 generic_cont0; -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 -#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 -#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 -#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 -#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 - - u32 pcie_cfg; - u32 features; - - u32 speed_cap_mask; -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 - - u32 link_settings; -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f -#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 - - u32 phy_cfg; - u32 mgmt_traffic; - - u32 ext_phy; - /* EEE power saving mode */ -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 - - u32 mba_cfg1; - u32 mba_cfg2; - u32 vf_cfg; - struct nvm_cfg_mac_address lldp_mac_address; - u32 led_port_settings; - u32 transceiver_00; - u32 device_ids; - - u32 board_cfg; -#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff -#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 -#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 -#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 - - u32 mnm_10g_cap; - u32 mnm_10g_ctrl; - u32 mnm_10g_misc; - u32 mnm_25g_cap; - u32 mnm_25g_ctrl; - u32 mnm_25g_misc; - u32 mnm_40g_cap; - u32 mnm_40g_ctrl; - u32 mnm_40g_misc; - u32 mnm_50g_cap; - u32 mnm_50g_ctrl; - u32 mnm_50g_misc; - u32 mnm_100g_cap; - u32 mnm_100g_ctrl; - u32 mnm_100g_misc; - - u32 temperature; - u32 ext_phy_cfg1; - - u32 extended_speed; -#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff -#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 - - u32 extended_fec_mode; - - u32 reserved[112]; -}; - -struct nvm_cfg1_func { - struct nvm_cfg_mac_address mac_address; - u32 rsrv1; - u32 rsrv2; - u32 device_id; - u32 cmn_cfg; - u32 pci_cfg; - struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; - struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; - u32 preboot_generic_cfg; - u32 reserved[8]; -}; - -struct nvm_cfg1 { - struct nvm_cfg1_glob glob; - struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; - struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; - struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; -}; - -enum spad_sections { - SPAD_SECTION_TRACE, - SPAD_SECTION_NVM_CFG, - SPAD_SECTION_PUBLIC, - SPAD_SECTION_PRIVATE, - SPAD_SECTION_MAX -}; - -#define MCP_TRACE_SIZE 2048 /* 2kb */ - -/* This section is located at a fixed location in the beginning of the - * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. - * All the rest of data has a floating location which differs from version to - * version, and is pointed by the mcp_meta_data below. - * Moreover, the spad_layout section is part of the MFW firmware, and is loaded - * with it from nvram in order to clear this portion. - */ -struct static_init { - u32 num_sections; - offsize_t sections[SPAD_SECTION_MAX]; -#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) - - struct mcp_trace trace; -#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace))) - u8 trace_buffer[MCP_TRACE_SIZE]; -#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer))) - /* running_mfw has the same definition as in nvm_map.h. - * This bit indicate both the running dir, and the running bundle. - * It is set once when the LIM is loaded. - */ - u32 running_mfw; -#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw)))) - u32 build_time; -#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time)))) - u32 reset_type; -#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type)))) - u32 mfw_secure_mode; -#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode)))) - u16 pme_status_pf_bitmap; -#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap)))) - u16 pme_enable_pf_bitmap; -#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap)))) - u32 mim_nvm_addr; - u32 mim_start_addr; - u32 ah_pcie_link_params; -#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff) -#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0) -#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00) -#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8) -#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000) -#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16) -#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000) -#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24) -#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params)))) - - u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */ -}; - -#define NVM_MAGIC_VALUE 0x669955aa - -enum nvm_image_type { - NVM_TYPE_TIM1 = 0x01, - NVM_TYPE_TIM2 = 0x02, - NVM_TYPE_MIM1 = 0x03, - NVM_TYPE_MIM2 = 0x04, - NVM_TYPE_MBA = 0x05, - NVM_TYPE_MODULES_PN = 0x06, - NVM_TYPE_VPD = 0x07, - NVM_TYPE_MFW_TRACE1 = 0x08, - NVM_TYPE_MFW_TRACE2 = 0x09, - NVM_TYPE_NVM_CFG1 = 0x0a, - NVM_TYPE_L2B = 0x0b, - NVM_TYPE_DIR1 = 0x0c, - NVM_TYPE_EAGLE_FW1 = 0x0d, - NVM_TYPE_FALCON_FW1 = 0x0e, - NVM_TYPE_PCIE_FW1 = 0x0f, - NVM_TYPE_HW_SET = 0x10, - NVM_TYPE_LIM = 0x11, - NVM_TYPE_AVS_FW1 = 0x12, - NVM_TYPE_DIR2 = 0x13, - NVM_TYPE_CCM = 0x14, - NVM_TYPE_EAGLE_FW2 = 0x15, - NVM_TYPE_FALCON_FW2 = 0x16, - NVM_TYPE_PCIE_FW2 = 0x17, - NVM_TYPE_AVS_FW2 = 0x18, - NVM_TYPE_INIT_HW = 0x19, - NVM_TYPE_DEFAULT_CFG = 0x1a, - NVM_TYPE_MDUMP = 0x1b, - NVM_TYPE_META = 0x1c, - NVM_TYPE_ISCSI_CFG = 0x1d, - NVM_TYPE_FCOE_CFG = 0x1f, - NVM_TYPE_ETH_PHY_FW1 = 0x20, - NVM_TYPE_ETH_PHY_FW2 = 0x21, - NVM_TYPE_BDN = 0x22, - NVM_TYPE_8485X_PHY_FW = 0x23, - NVM_TYPE_PUB_KEY = 0x24, - NVM_TYPE_RECOVERY = 0x25, - NVM_TYPE_PLDM = 0x26, - NVM_TYPE_UPK1 = 0x27, - NVM_TYPE_UPK2 = 0x28, - NVM_TYPE_MASTER_KC = 0x29, - NVM_TYPE_BACKUP_KC = 0x2a, - NVM_TYPE_HW_DUMP = 0x2b, - NVM_TYPE_HW_DUMP_OUT = 0x2c, - NVM_TYPE_BIN_NVM_META = 0x30, - NVM_TYPE_ROM_TEST = 0xf0, - NVM_TYPE_88X33X0_PHY_FW = 0x31, - NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, - NVM_TYPE_MAX, -}; - -#define DIR_ID_1 (0) - #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 2734f49956f7..e535983ce21b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask { #define DMAE_MAX_CLIENTS 32 /** - * @brief qed_gtt_init - Initialize GTT windows + * qed_gtt_init(): Initialize GTT windows. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured + * qed_ptt_invalidate(): Forces all ptt entries to be re-configured + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool + * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return struct _qed_status - success (0), negative - error. + * Return: struct _qed_status - success (0), negative - error. */ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_free - + * qed_ptt_pool_free(): Free PTT pool. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address + * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address + * qed_ptt_get_bar_addr(): Get PPT's external BAR address. * - * @param p_hwfn - * @param p_ptt + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); /** - * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address + * qed_ptt_set_win(): Set PTT Window's GRC BAR address * - * @param p_hwfn - * @param new_hw_addr - * @param p_ptt + * @p_hwfn: HW device data. + * @new_hw_addr: New HW address. + * @p_ptt: P_Ptt + * + * Return: Void. */ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 new_hw_addr); /** - * @brief qed_get_reserved_ptt - Get a specific reserved PTT + * qed_get_reserved_ptt(): Get a specific reserved PTT. * - * @param p_hwfn - * @param ptt_idx + * @p_hwfn: HW device data. + * @ptt_idx: Ptt Index. * - * @return struct qed_ptt * + * Return: struct qed_ptt *. */ struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, enum reserved_ptts ptt_idx); /** - * @brief qed_wr - Write value to BAR using the given ptt + * qed_wr(): Write value to BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @val: Val. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ void qed_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn, u32 val); /** - * @brief qed_rd - Read value from BAR using the given ptt + * qed_rd(): Read value from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ u32 qed_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr); /** - * @brief qed_memcpy_from - copy n bytes from BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param dest - * @param hw_addr - * @param n + * qed_memcpy_from(): Copy n bytes from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @dest: Destination. + * @hw_addr: HW address. + * @n: N + * + * Return: Void. */ void qed_memcpy_from(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, size_t n); /** - * @brief qed_memcpy_to - copy n bytes to BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param hw_addr - * @param src - * @param n + * qed_memcpy_to(): Copy n bytes to BAR using the given ptt + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address. + * @src: Source. + * @n: N + * + * Return: Void. */ void qed_memcpy_to(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, void *src, size_t n); /** - * @brief qed_fid_pretend - pretend to another function when - * accessing the ptt window. There is no way to unpretend - * a function. The only way to cancel a pretend is to - * pretend back to the original function. - * - * @param p_hwfn - * @param p_ptt - * @param fid - fid field of pxp_pretend structure. Can contain - * either pf / vf, port/path fields are don't care. + * qed_fid_pretend(): pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @fid: fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + * + * Return: Void. */ void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid); /** - * @brief qed_port_pretend - pretend to another port when - * accessing the ptt window + * qed_port_pretend(): Pretend to another port when accessing the ptt window * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * + * Return: Void. */ void qed_port_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id); /** - * @brief qed_port_unpretend - cancel any previously set port - * pretend + * qed_port_unpretend(): Cancel any previously set port pretend + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_port_fid_pretend - pretend to another port and another function - * when accessing the ptt window + * qed_port_fid_pretend(): Pretend to another port and another function + * when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * @fid: fid field of pxp_pretend structure. Can contain either pf / vf. * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to - * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + * Return: Void. */ void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id, u16 fid); /** - * @brief qed_vfid_to_concrete - build a concrete FID for a - * given VF ID + * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID * - * @param p_hwfn - * @param p_ptt - * @param vfid + * @p_hwfn: HW device data. + * @vfid: VFID. + * + * Return: Void. */ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); /** - * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd - * this is declared here since other files will require it. - * @param idx + * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd + * this is declared here since other files will require it. + * + * @idx: Index + * + * Return: Void. */ u32 qed_dmae_idx_to_go_cmd(u8 idx); /** - * @brief qed_dmae_info_alloc - Init the dmae_info structure - * which is part of p_hwfn. - * @param p_hwfn + * qed_dmae_info_alloc(): Init the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Int. */ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_dmae_info_free - Free the dmae_info structure - * which is part of p_hwfn + * qed_dmae_info_free(): Free the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_dmae_info_free(struct qed_hwfn *p_hwfn); @@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn, #define QED_HW_ERR_MAX_STR_SIZE 256 /** - * @brief qed_hw_err_notify - Notify upper layer driver and management FW - * about a HW error. - * - * @param p_hwfn - * @param p_ptt - * @param err_type - * @param fmt - debug data buffer to send to the MFW - * @param ... - buffer format args + * qed_hw_err_notify(): Notify upper layer driver and management FW + * about a HW error. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @err_type: Err Type. + * @fmt: Debug data buffer to send to the MFW + * @...: buffer format args + * + * Return void. */ void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index ea888a2c6ddb..321c43408153 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #include <linux/types.h> @@ -13,17 +13,18 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_iro_hsi.h" #include "qed_reg_addr.h" -#define CDU_VALIDATION_DEFAULT_CFG 61 +#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG -static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ }; -static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ }; @@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 +/* Initial VOQ byte credit */ +#define QM_INITIAL_VOQ_BYTE_CRD 98304 /* Other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 +/* VOQ constants */ +#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) +#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) + /* WFQ constants */ -/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ -#define QM_WFQ_UPPER_BOUND 62500000 +/* PF WFQ increment value, 0x9000 = 4*9*1024 */ +#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_WFQ_UPPER_BOUND 62500000 + +/* PF WFQ max increment value, 0.7 * upper bound */ +#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) + +/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ +#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 + +/* VP WFQ increment value */ +#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) -/* Bit of VOQ in WFQ VP PQ map */ -#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 +/* VP WFQ min increment value */ +#define QM_VP_WFQ_MIN_INC_VAL 10800 -/* Bit of PF in WFQ VP PQ map */ -#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 +/* VP WFQ max increment value, 2^30 */ +#define QM_VP_WFQ_MAX_INC_VAL 0x40000000 -/* 0x9000 = 4*9*1024 */ -#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) +/* VP WFQ bypass threshold */ +#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) -/* Max WFQ increment value is 0.7 * upper bound */ -#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) +/* VP RL credit task cost */ +#define QM_VP_RL_CRD_TASK_COST 9700 + +/* Bit of VOQ in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_VOQ_SHIFT 0 + +/* Bit of PF in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_PF_SHIFT 5 /* RL constants */ @@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps */ -#define QM_RL_INC_VAL(rate) ({ \ - typeof(rate) __rate = (rate); \ - max_t(u32, \ - (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \ - (8 * 100)), \ - 1); }) +#define QM_RL_INC_VAL(rate) ({ \ + typeof(rate) __rate = (rate); \ + max_t(u32, \ + (u32)(((__rate ? __rate : \ + 100000) * \ + QM_RL_PERIOD * \ + 101) / (8 * 100)), 1); }) /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ #define QM_PF_RL_UPPER_BOUND 62500000 @@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { /* Max PF RL increment value is 0.7 * upper bound */ #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) -/* Vport RL Upper bound, link speed is in Mpbs */ -#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \ - QM_RL_INC_VAL(speed), \ - 9700 + 1000)) - -/* Max Vport RL increment value is the Vport RL upper bound */ -#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) - -/* Vport RL credit threshold in case of QM bypass */ -#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) +/* QCN RL Upper bound, speed is in Mpbs */ +#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ + u32, \ + (u32)(((speed) * \ + QM_RL_PERIOD * 101) / (8 * 100)), \ + QM_VP_RL_CRD_TASK_COST \ + + 1000)) /* AFullOprtnstcCrdMask constants */ #define QM_OPPOR_LINE_VOQ_DEF 1 @@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { cmd ## _ ## field, \ value) -#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \ +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ rl_id, ext_voq, wrr) \ do { \ u32 __reg = 0; \ \ BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ - \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ + memset(&(map), 0, sizeof(map)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ !!(rl_valid)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ (wrr)); \ \ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ @@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (((rl) >> 8) << 9)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ - XSTORM_PQ_INFO_OFFSET(pq_id) + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ + XSTORM_PQ_INFO_OFFSET(pq_id)) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { - u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u8 num_ext_voqs = MAX_NUM_VOQS; u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; /* Enable RLs for all VOQs */ @@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) if (pf_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, - QM_WFQ_UPPER_BOUND); + QM_PF_WFQ_UPPER_BOUND); } /* Prepare global RL enable/disable runtime init values */ @@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, - QM_VP_RL_BYPASS_THRESH_SPEED); + QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); } } @@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) if (vport_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, - QM_WFQ_UPPER_BOUND); + QM_VP_WFQ_BYPASS_THRESH); } /* Prepare runtime init values to allocate PBF command queue lines for @@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, } /* Prepare runtime init values to allocate PBF command queue lines. */ -static void qed_cmdq_lines_rt_init( - struct qed_hwfn *p_hwfn, - u8 max_ports_per_engine, - u8 max_phys_tcs_per_port, - struct init_qm_port_params port_params[MAX_NUM_PORTS]) +static void +qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u8 tc, ext_voq, port_id, num_tcs_in_port; - u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u8 num_ext_voqs = MAX_NUM_VOQS; /* Clear PBF lines of all VOQs */ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) @@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init( * - No optimization for lossy TC (all are considered lossless). Shared space * is not enabled and allocated for each TC. */ -static void qed_btb_blocks_rt_init( - struct qed_hwfn *p_hwfn, - u8 max_ports_per_engine, - u8 max_phys_tcs_per_port, - struct init_qm_port_params port_params[MAX_NUM_PORTS]) +static void +qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; u8 tc, ext_voq, port_id, num_tcs_in_port; @@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init( */ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) { - u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | + u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | (u32)QM_RL_CRD_REG_SIGN_BIT; u32 inc_val; u16 rl_id; @@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) return 0; } +/* Returns the upper bound for the specified Vport RL parameters. + * link_speed is in Mbps. + * Returns 0 in case of error. + */ +static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, + u32 link_speed) +{ + switch (vport_rl_type) { + case QM_RL_TYPE_NORMAL: + return QM_INITIAL_VOQ_BYTE_CRD; + case QM_RL_TYPE_QCN: + return QM_GLOBAL_RL_UPPER_BOUND(link_speed); + default: + return 0; + } +} + +/* Prepare VPORT RL runtime init values. + * Return -1 on error. + */ +static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, + u16 start_rl, + u16 num_rls, + u32 link_speed, + struct init_qm_rl_params *rl_params) +{ + u16 i, rl_id; + + if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { + DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); + return -1; + } + + /* Go over all PF VPORTs */ + for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { + u32 upper_bound, inc_val; + + upper_bound = + qed_get_vport_rl_upper_bound((enum init_qm_rl_type) + rl_params[i].vport_rl_type, + link_speed); + + inc_val = + QM_RL_INC_VAL(rl_params[i].vport_rl ? + rl_params[i].vport_rl : link_speed); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, + "Invalid RL rate - limit configuration\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, + inc_val); + } + + return 0; +} + /* Prepare Tx PQ mapping runtime init values for the specified PF */ -static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params, - u32 base_mem_addr_4kb) +static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) { u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; struct init_qm_vport_params *vport_params = p_params->vport_params; @@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { u16 *p_first_tx_pq_id, vport_id_in_pf; - struct qm_rf_pq_map_e4 tx_pq_map; + struct qm_rf_pq_map tx_pq_map; u8 tc_id = pq_params[i].tc_id; bool is_vf_pq; u8 ext_voq; @@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { u32 map_val = - (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | - (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT); + (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | + (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); /* Create new VP PQ */ *p_first_tx_pq_id = pq_id; @@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Prepare PQ map entry */ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, - E4, pq_id, *p_first_tx_pq_id, pq_params[i].rl_valid, @@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); + + return 0; } /* Prepare Other PQ mapping runtime init values for the specified PF */ @@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, - struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; @@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, u8 ext_voq; u16 i; - inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, inc_val); @@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u16 num_vports, struct init_qm_vport_params *vport_params) { - u16 vport_pq_id, i; + u16 vport_pq_id, wfq, i; u32 inc_val; u8 tc; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { - if (!vport_params[i].wfq) - continue; - - inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq); - if (inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, - "Invalid VPORT WFQ weight configuration\n"); - return -1; - } - /* Each VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { + /* Check if VPORT/TC is valid */ vport_pq_id = vport_params[i].first_tx_pq_id[tc]; - if (vport_pq_id != QM_INVALID_PQ_ID) { - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPCRD_RT_OFFSET + - vport_pq_id, - (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPWEIGHT_RT_OFFSET + - vport_pq_id, inc_val); + if (vport_pq_id == QM_INVALID_PQ_ID) + continue; + + /* Find WFQ weight (per VPORT or per VPORT+TC) */ + wfq = vport_params[i].wfq; + wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, + "Invalid VPORT WFQ weight configuration\n"); + return -1; } + + /* Config registers */ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + + vport_pq_id, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + + vport_pq_id, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); } } @@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, QM_OPPOR_LINE_VOQ_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, + p_params->pf_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, + p_params->vport_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, + p_params->pf_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, - p_params->global_rl_en); + p_params->global_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); @@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, u16 i; u8 tc; - /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) for (tc = 0; tc < NUM_OF_TCS; tc++) @@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, p_params->num_tids, 0); /* Map Tx PQs */ - qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); + if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) + return -1; /* Init PF WFQ */ if (p_params->pf_wfq) @@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; + /* Set VPORT RL */ + if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, + p_params->num_rls, p_params->link_speed, + p_params->rl_params)) + return -1; + return 0; } int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) { - u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); + u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) { + int result = 0; u16 vport_pq_id; - u32 inc_val; u8 tc; - inc_val = QM_WFQ_INC_VAL(wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { + vport_pq_id = first_tx_pq_id[tc]; + if (vport_pq_id != QM_INVALID_PQ_ID) + result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, + vport_pq_id, wfq); + } + + return result; +} + +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 wfq) +{ + u32 inc_val; + + if (first_tx_pq_id == QM_INVALID_PQ_ID) + return -1; + + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); return -1; } - /* A VPORT can have several VPORT PQ IDs for various TCs */ - for (tc = 0; tc < NUM_OF_TCS; tc++) { - vport_pq_id = first_tx_pq_id[tc]; - if (vport_pq_id != QM_INVALID_PQ_ID) - qed_wr(p_hwfn, - p_ptt, - QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); - } + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, + inc_val); return 0; } int qed_init_global_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit) + struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type) { - u32 inc_val; + u32 inc_val, upper_bound; + upper_bound = + (vport_rl_type == + QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : + QM_INITIAL_VOQ_BYTE_CRD; inc_val = QM_RL_INC_VAL(rate_limit); - if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) { - DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n"); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, + QM_REG_RLGLBLUPPERBOUND + rl_id * 4, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); return 0; @@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, __le32 *p_data, u32 addr, u32 len_in_dwords) { - struct qed_dmae_params params = {}; + struct qed_dmae_params params = { 0 }; u32 *data_cpu; int rc; @@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, + eth_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, + ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, bool eth_geneve_enable, bool ip_geneve_enable) { u32 reg_val; - u8 shift; /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, + eth_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, + ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, /* Update DORQ registers */ qed_wr(p_hwfn, p_ptt, - DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, + DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, - DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, + DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, ip_geneve_enable ? 1 : 0); } #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 -#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) @@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, /* update PRS FIC register */ qed_wr(p_hwfn, p_ptt, - PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); } else { /* clear VXLAN_NO_L2_ENABLE flag */ @@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - struct regpair ram_line = { }; + struct regpair ram_line = { 0 }; /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); @@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, storm_buf_size = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_BUF_SIZE); storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); + if (storm_id >= NUM_STORMS) + break; storm_mem_desc = allocated_mem + storm_id; storm_mem_desc->size = storm_buf_size * sizeof(u32); @@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, /* If memory allocation has failed, free all allocated memory */ if (buf_offset < buf_size) { - qed_fw_overlay_mem_free(p_hwfn, allocated_mem); + qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); return NULL; } @@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, } void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem) + struct phys_mem_desc **fw_overlay_mem) { u8 storm_id; - if (!fw_overlay_mem) + if (!fw_overlay_mem || !(*fw_overlay_mem)) return; for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { struct phys_mem_desc *storm_mem_desc = - (struct phys_mem_desc *)fw_overlay_mem + storm_id; + (struct phys_mem_desc *)*fw_overlay_mem + storm_id; /* Free Storm's physical memory */ if (storm_mem_desc->virt_addr) @@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, } /* Free allocated virtual memory */ - kfree(fw_overlay_mem); + kfree(*fw_overlay_mem); + *fw_overlay_mem = NULL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 7e6c6389523b..b3bf9899c1a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -15,6 +15,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_iro_hsi.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -46,30 +47,32 @@ static u32 pxp_global_win[] = { /* IRO Array */ static const u32 iro_arr[] = { 0x00000000, 0x00000000, 0x00080000, + 0x00004478, 0x00000008, 0x00080000, 0x00003288, 0x00000088, 0x00880000, - 0x000058e8, 0x00000020, 0x00200000, + 0x000058a8, 0x00000020, 0x00200000, + 0x00003188, 0x00000008, 0x00080000, 0x00000b00, 0x00000008, 0x00040000, 0x00000a80, 0x00000008, 0x00040000, 0x00000000, 0x00000008, 0x00020000, 0x00000080, 0x00000008, 0x00040000, 0x00000084, 0x00000008, 0x00020000, - 0x00005718, 0x00000004, 0x00040000, - 0x00004dd0, 0x00000000, 0x00780000, + 0x00005798, 0x00000004, 0x00040000, + 0x00004e50, 0x00000000, 0x00780000, 0x00003e40, 0x00000000, 0x00780000, - 0x00004480, 0x00000000, 0x00780000, + 0x00004500, 0x00000000, 0x00780000, 0x00003210, 0x00000000, 0x00780000, 0x00003b50, 0x00000000, 0x00780000, 0x00007f58, 0x00000000, 0x00780000, - 0x00005f58, 0x00000000, 0x00080000, + 0x00005fd8, 0x00000000, 0x00080000, 0x00007100, 0x00000000, 0x00080000, - 0x0000aea0, 0x00000000, 0x00080000, + 0x0000af20, 0x00000000, 0x00080000, 0x00004398, 0x00000000, 0x00080000, 0x0000a5a0, 0x00000000, 0x00080000, 0x0000bde8, 0x00000000, 0x00080000, 0x00000020, 0x00000004, 0x00040000, - 0x000056c8, 0x00000010, 0x00100000, + 0x00005688, 0x00000010, 0x00100000, 0x0000c210, 0x00000030, 0x00300000, - 0x0000b088, 0x00000038, 0x00380000, + 0x0000b108, 0x00000038, 0x00380000, 0x00003d20, 0x00000080, 0x00400000, 0x0000bf60, 0x00000000, 0x00040000, 0x00004560, 0x00040080, 0x00040000, @@ -77,11 +80,11 @@ static const u32 iro_arr[] = { 0x00003d60, 0x00000080, 0x00200000, 0x00008960, 0x00000040, 0x00300000, 0x0000e840, 0x00000060, 0x00600000, - 0x00004618, 0x00000080, 0x00380000, - 0x00010738, 0x000000c0, 0x00c00000, + 0x00004698, 0x00000080, 0x00380000, + 0x000107b8, 0x000000c0, 0x00c00000, 0x000001f8, 0x00000002, 0x00020000, - 0x0000a2a0, 0x00000000, 0x01080000, - 0x0000a3a8, 0x00000008, 0x00080000, + 0x0000a260, 0x00000000, 0x01080000, + 0x0000a368, 0x00000008, 0x00080000, 0x000001c0, 0x00000008, 0x00080000, 0x000001f8, 0x00000008, 0x00080000, 0x00000ac0, 0x00000008, 0x00080000, @@ -90,39 +93,46 @@ static const u32 iro_arr[] = { 0x00000280, 0x00000008, 0x00080000, 0x00000680, 0x00080018, 0x00080000, 0x00000b78, 0x00080018, 0x00020000, - 0x0000c640, 0x00000050, 0x003c0000, - 0x00012038, 0x00000018, 0x00100000, - 0x00011b00, 0x00000040, 0x00180000, - 0x000095d0, 0x00000050, 0x00200000, + 0x0000c600, 0x00000058, 0x003c0000, + 0x00012038, 0x00000020, 0x00100000, + 0x00011b00, 0x00000048, 0x00180000, + 0x00009650, 0x00000050, 0x00200000, 0x00008b10, 0x00000040, 0x00280000, - 0x00011640, 0x00000018, 0x00100000, - 0x0000c828, 0x00000048, 0x00380000, - 0x00011710, 0x00000020, 0x00200000, - 0x00004650, 0x00000080, 0x00100000, + 0x000116c0, 0x00000018, 0x00100000, + 0x0000c808, 0x00000048, 0x00380000, + 0x00011790, 0x00000020, 0x00200000, + 0x000046d0, 0x00000080, 0x00100000, 0x00003618, 0x00000010, 0x00100000, - 0x0000a968, 0x00000008, 0x00010000, + 0x0000a9e8, 0x00000008, 0x00010000, 0x000097a0, 0x00000008, 0x00010000, - 0x00011990, 0x00000008, 0x00010000, - 0x0000f018, 0x00000008, 0x00010000, - 0x00012628, 0x00000008, 0x00010000, - 0x00011da8, 0x00000008, 0x00010000, - 0x0000aa78, 0x00000030, 0x00100000, - 0x0000d768, 0x00000028, 0x00280000, - 0x00009a58, 0x00000018, 0x00180000, - 0x00009bd8, 0x00000008, 0x00080000, - 0x00013a18, 0x00000008, 0x00080000, - 0x000126e8, 0x00000018, 0x00180000, - 0x0000e608, 0x00500288, 0x00100000, - 0x00012970, 0x00000138, 0x00280000, + 0x00011a10, 0x00000008, 0x00010000, + 0x0000e9f8, 0x00000008, 0x00010000, + 0x00012648, 0x00000008, 0x00010000, + 0x000121c8, 0x00000008, 0x00010000, + 0x0000af08, 0x00000030, 0x00100000, + 0x0000d748, 0x00000028, 0x00280000, + 0x00009e68, 0x00000018, 0x00180000, + 0x00009fe8, 0x00000008, 0x00080000, + 0x00013ea8, 0x00000008, 0x00080000, + 0x00012f18, 0x00000018, 0x00180000, + 0x0000dfe8, 0x00500288, 0x00100000, + 0x000131a0, 0x00000138, 0x00280000, }; void qed_init_iro_array(struct qed_dev *cdev) { - cdev->iro_arr = iro_arr; + cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { + if (rt_offset >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing %u in rt_data at index %u!\n", + val, rt_offset); + return; + } + p_hwfn->rt_data.init_val[rt_offset] = val; p_hwfn->rt_data.b_valid[rt_offset] = true; } @@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, { size_t i; + if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing values in rt_data at indices %u-%u!\n", + rt_offset, + (u32)(rt_offset + size - 1)); + return; + } + for (i = 0; i < size / sizeof(u32); i++) { p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; p_hwfn->rt_data.b_valid[rt_offset + i] = true; @@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, return rc; /* invalidate after writing */ - for (j = i; j < i + segment; j++) + for (j = i; j < (u32)(i + segment); j++) p_valid[j] = false; /* Jump over the entire segment, including invalid entry */ @@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, u32 fill, u32 fill_count) + u32 addr, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; struct qed_dmae_params params = {}; @@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, case INIT_SRC_ZEROS: data = le32_to_cpu(p_cmd->args.zeros_count); if (b_must_dmae || (b_can_dmae && (data >= 64))) - rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data); else qed_init_fill(p_hwfn, p_ptt, addr, 0, data); break; @@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); - val = qed_rd(p_hwfn, p_ptt, addr); if (poll == INIT_POLL_NONE) @@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, INIT_IF_MODE_OP_CMD_OFFSET); } -static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, - struct init_if_phase_op *p_cmd, +static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd, u32 phase, u32 phase_id) { u32 data = le32_to_cpu(p_cmd->phase_data); @@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, modes); break; case INIT_OP_IF_PHASE: - cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase, + cmd_num += qed_init_cmd_phase(&cmd->if_phase, phase, phase_id); break; case INIT_OP_DELAY: diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index a573c8921982..12e5c4e370d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -12,23 +12,24 @@ #include "qed.h" /** - * @brief qed_init_iro_array - init iro_arr. + * qed_init_iro_array(): init iro_arr. * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_iro_array(struct qed_dev *cdev); /** - * @brief qed_init_run - Run the init-sequence. + * qed_init_run(): Run the init-sequence. * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @phase: Phase. + * @phase_id: Phase ID. + * @modes: Mode. * - * @param p_hwfn - * @param p_ptt - * @param phase - * @param phase_id - * @param modes - * @return _qed_status_t + * Return: _qed_status_t */ int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn, int modes); /** - * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * qed_init_alloc(): Allocate RT array, Store 'values' ptrs. * + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return _qed_status_t + * Return: _qed_status_t. */ int qed_init_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_hwfn_deallocate + * qed_init_free(): Init HW function deallocate. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_init_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. + * qed_init_store_rt_reg(): Store a configuration value in the RT array. * + * @p_hwfn: HW device data. + * @rt_offset: RT offset. + * @val: Val. * - * @param p_hwfn - * @param rt_offset - * @param val + * Return: Void. */ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, @@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, #define OVERWRITE_RT_REG(hwfn, offset, val) \ qed_init_store_rt_reg(hwfn, offset, val) -/** - * @brief - * - * - * @param p_hwfn - * @param rt_offset - * @param val - * @param size - */ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 *val, size_t size); #define STORE_RT_REG_AGG(hwfn, offset, val) \ - qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) + qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val)) /** - * @brief - * Initialize GTT global windows and set admin window - * related params of GTT/PTT to default values. + * qed_gtt_init(): Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index f78e6055f654..a97f691839e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -36,7 +36,7 @@ struct qed_sb_sp_info { struct qed_sb_info sb_info; /* per protocol index data */ - struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; + struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; enum qed_attention_type { @@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, else SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); - sb_offset = igu_sb_id * PIS_PER_SB_E4; + sb_offset = igu_sb_id * PIS_PER_SB; pi_offset = sb_offset + pi_index; if (p_hwfn->hw_init_done) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c5550e96bbe1..84c17e97f569 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -53,51 +53,54 @@ enum qed_coalescing_fsm { }; /** - * @brief qed_int_igu_enable_int - enable device interrupts + * qed_int_igu_enable_int(): Enable device interrupts. * - * @param p_hwfn - * @param p_ptt - * @param int_mode - interrupt mode to use + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrupt mode to use. + * + * Return: Void. */ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief qed_int_igu_disable_int - disable device interrupts + * qed_int_igu_disable_int(): Disable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc - * register from igu. + * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc + * register from igu. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u64 + * Return: u64. */ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); #define QED_SP_SB_ID 0xffff /** - * @brief qed_int_sb_init - Initializes the sb_info structure. + * qed_int_sb_init(): Initializes the sb_info structure. * - * once the structure is initialized it can be passed to sb related functions. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: points to an uninitialized (but allocated) sb_info structure + * @sb_virt_addr: SB Virtual address. + * @sb_phy_addr: SB Physial address. + * @sb_id: the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block * - * @param p_hwfn - * @param p_ptt - * @param sb_info points to an uninitialized (but - * allocated) sb_info structure - * @param sb_virt_addr - * @param sb_phy_addr - * @param sb_id the sb_id to be used (zero based in driver) - * should use QED_SP_SB_ID for SP Status block + * Return: int. * - * @return int + * Once the structure is initialized it can be passed to sb related functions. */ int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr, u16 sb_id); /** - * @brief qed_int_sb_setup - Setup the sb. + * qed_int_sb_setup(): Setup the sb. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: Initialized sb_info structure. * - * @param p_hwfn - * @param p_ptt - * @param sb_info initialized sb_info structure + * Return: Void. */ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info); /** - * @brief qed_int_sb_release - releases the sb_info structure. + * qed_int_sb_release(): Releases the sb_info structure. * - * once the structure is released, it's memory can be freed + * @p_hwfn: HW device data. + * @sb_info: Points to an allocated sb_info structure. + * @sb_id: The sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block). * - * @param p_hwfn - * @param sb_info points to an allocated sb_info structure - * @param sb_id the sb_id to be used (zero based in driver) - * should never be equal to QED_SP_SB_ID - * (SP Status block) + * Return: int. * - * @return int + * Once the structure is released, it's memory can be freed. */ int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id); /** - * @brief qed_int_sp_dpc - To be called when an interrupt is received on the - * default status block. + * qed_int_sp_dpc(): To be called when an interrupt is received on the + * default status block. * - * @param p_hwfn - pointer to hwfn + * @t: Tasklet. + * + * Return: Void. * */ void qed_int_sp_dpc(struct tasklet_struct *t); /** - * @brief qed_int_get_num_sbs - get the number of status - * blocks configured for this funciton in the igu. + * qed_int_get_num_sbs(): Get the number of status blocks configured + * for this funciton in the igu. * - * @param p_hwfn - * @param p_sb_cnt_info + * @p_hwfn: HW device data. + * @p_sb_cnt_info: Pointer to SB count info. * - * @return int - number of status blocks configured + * Return: Void. */ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, struct qed_sb_cnt_info *p_sb_cnt_info); /** - * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * qed_int_disable_post_isr_release(): Performs the cleanup post ISR * release. The API need to be called after releasing all slowpath IRQs * of the device. * - * @param cdev + * @cdev: Qed dev pointer. * + * Return: Void. */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** - * @brief qed_int_attn_clr_enable - sets whether the general behavior is + * qed_int_attn_clr_enable: Sets whether the general behavior is * preventing attentions from being reasserted, or following the * attributes of the specific attention. * - * @param cdev - * @param clr_enable + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable + * + * Return: Void. * */ void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** - * @brief - Doorbell Recovery handler. + * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_SB_EVENT_MASK 0x0003 #define SB_ALIGNED_SIZE(p_hwfn) \ - ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn) + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) #define QED_SB_INVALID_IDX 0xffff @@ -223,30 +235,34 @@ struct qed_igu_info { }; /** - * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources + * provided by MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into + * an IGU sb-id * - * @param p_hwfn - * @param sb_id - user provided sb_id + * @p_hwfn: HW device data. + * @sb_id: user provided sb_id. * - * @return an index inside IGU CAM where the SB resides + * Return: An index inside IGU CAM where the SB resides. */ u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief return a pointer to an unused valid SB + * qed_get_igu_free_sb(): Return a pointer to an unused valid SB * - * @param p_hwfn - * @param b_is_pf - true iff we want a SB belonging to a PF + * @p_hwfn: HW device data. + * @b_is_pf: True iff we want a SB belonging to a PF. * - * @return point to an igu_block, NULL if none is available + * Return: Point to an igu_block, NULL if none is available. */ struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf); @@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_igu_read_cam - Reads the IGU CAM. + * qed_int_igu_read_cam(): Reads the IGU CAM. * This function needs to be called during hardware * prepare. It reads the info from igu cam to know which * status block is the default / base status block etc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_int_register_cb - Register callback func for - * slowhwfn statusblock. - * - * Every protocol that uses the slowhwfn status block - * should register a callback function that will be called - * once there is an update of the sp status block. - * - * @param p_hwfn - * @param comp_cb - function to be called when there is an - * interrupt on the sp sb - * - * @param cookie - passed to the callback function - * @param sb_idx - OUT parameter which gives the chosen index - * for this protocol. - * @param p_fw_cons - pointer to the actual address of the - * consumer for this protocol. - * - * @return int + * qed_int_register_cb(): Register callback func for slowhwfn statusblock. + * + * @p_hwfn: HW device data. + * @comp_cb: Function to be called when there is an + * interrupt on the sp sb + * @cookie: Passed to the callback function + * @sb_idx: (OUT) parameter which gives the chosen index + * for this protocol. + * @p_fw_cons: Pointer to the actual address of the + * consumer for this protocol. + * + * Return: Int. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. */ int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, @@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons); /** - * @brief qed_int_unregister_cb - Unregisters callback - * function from sp sb. - * Partner of qed_int_register_cb -> should be called - * when no longer required. + * qed_int_unregister_cb(): Unregisters callback function from sp sb. + * + * @p_hwfn: HW device data. + * @pi: Producer Index. * - * @param p_hwfn - * @param pi + * Return: Int. * - * @return int + * Partner of qed_int_register_cb -> should be called + * when no longer required. */ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi); /** - * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id. + * qed_int_get_sp_sb_id(): Get the slowhwfn sb id. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u16 + * Return: u16. */ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); /** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - igu status block id - * @param opaque - opaque fid of the sb owner. - * @param b_set - set(1) / clear(0) + * qed_int_igu_init_pure_rt_single(): Status block cleanup. + * Should be called for each status + * block that will be used -> both PF / VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @igu_sb_id: IGU status block id. + * @opaque: Opaque fid of the sb owner. + * @b_set: Set(1) / Clear(0). + * + * Return: Void. */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, bool b_set); /** - * @brief qed_int_cau_conf - configure cau for a given status - * block - * - * @param p_hwfn - * @param ptt - * @param sb_phys - * @param igu_sb_id - * @param vf_number - * @param vf_valid + * qed_int_cau_conf_sb(): Configure cau for a given status block. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_phys: SB Physical. + * @igu_sb_id: IGU status block id. + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. */ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid); /** - * @brief qed_int_alloc + * qed_int_alloc(): QED interrupt alloc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_free + * qed_int_free(): QED interrupt free. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_int_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_setup + * qed_int_setup(): QED interrupt setup. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Enable Interrupt & Attention for hw function + * qed_int_igu_enable(): Enable Interrupt & Attention for hw function. * - * @param p_hwfn - * @param p_ptt - * @param int_mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrut mode * - * @return int + * Return: Int. */ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief - Initialize CAU status block entry + * qed_init_cau_sb_entry(): Initialize CAU status block entry. + * + * @p_hwfn: HW device data. + * @p_sb_entry: Pointer SB entry. + * @pf_id: PF number + * @vf_number: VF number + * @vf_valid: VF valid or not. * - * @param p_hwfn - * @param p_sb_entry - * @param pf_id - * @param vf_number - * @param vf_valid + * Return: Void. */ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h new file mode 100644 index 000000000000..3ccdd3b1d8cb --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_IRO_HSI_H +#define _QED_IRO_HSI_H + +#include <linux/types.h> + +enum { + IRO_YSTORM_FLOW_CONTROL_MODE_GTT, + IRO_PSTORM_PKT_DUPLICATION_CFG, + IRO_TSTORM_PORT_STAT, + IRO_TSTORM_LL2_PORT_STAT, + IRO_TSTORM_PKT_DUPLICATION_CFG, + IRO_USTORM_VF_PF_CHANNEL_READY_GTT, + IRO_USTORM_FLR_FINAL_ACK_GTT, + IRO_USTORM_EQE_CONS_GTT, + IRO_USTORM_ETH_QUEUE_ZONE_GTT, + IRO_USTORM_COMMON_QUEUE_CONS_GTT, + IRO_XSTORM_PQ_INFO, + IRO_XSTORM_INTEG_TEST_DATA, + IRO_YSTORM_INTEG_TEST_DATA, + IRO_PSTORM_INTEG_TEST_DATA, + IRO_TSTORM_INTEG_TEST_DATA, + IRO_MSTORM_INTEG_TEST_DATA, + IRO_USTORM_INTEG_TEST_DATA, + IRO_XSTORM_OVERLAY_BUF_ADDR, + IRO_YSTORM_OVERLAY_BUF_ADDR, + IRO_PSTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_OVERLAY_BUF_ADDR, + IRO_MSTORM_OVERLAY_BUF_ADDR, + IRO_USTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_LL2_RX_PRODS_GTT, + IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_USTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT, + IRO_MSTORM_QUEUE_STAT, + IRO_MSTORM_TPA_TIMEOUT_US, + IRO_MSTORM_ETH_VF_PRODS, + IRO_MSTORM_ETH_PF_PRODS_GTT, + IRO_MSTORM_ETH_PF_STAT, + IRO_USTORM_QUEUE_STAT, + IRO_USTORM_ETH_PF_STAT, + IRO_PSTORM_QUEUE_STAT, + IRO_PSTORM_ETH_PF_STAT, + IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT, + IRO_TSTORM_ETH_PRS_INPUT, + IRO_ETH_RX_RATE_LIMIT, + IRO_TSTORM_ETH_RSS_UPDATE_GTT, + IRO_XSTORM_ETH_QUEUE_ZONE_GTT, + IRO_YSTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_GRQ_PROD, + IRO_TSTORM_SCSI_CMDQ_CONS_GTT, + IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_TSTORM_ISCSI_RX_STATS, + IRO_MSTORM_ISCSI_RX_STATS, + IRO_USTORM_ISCSI_RX_STATS, + IRO_XSTORM_ISCSI_TX_STATS, + IRO_YSTORM_ISCSI_TX_STATS, + IRO_PSTORM_ISCSI_TX_STATS, + IRO_TSTORM_FCOE_RX_STATS, + IRO_PSTORM_FCOE_TX_STATS, + IRO_PSTORM_RDMA_QUEUE_STAT, + IRO_TSTORM_RDMA_QUEUE_STAT, + IRO_XSTORM_RDMA_ASSERT_LEVEL, + IRO_YSTORM_RDMA_ASSERT_LEVEL, + IRO_PSTORM_RDMA_ASSERT_LEVEL, + IRO_TSTORM_RDMA_ASSERT_LEVEL, + IRO_MSTORM_RDMA_ASSERT_LEVEL, + IRO_USTORM_RDMA_ASSERT_LEVEL, + IRO_XSTORM_IWARP_RXMIT_STATS, + IRO_TSTORM_ROCE_EVENTS_STAT, + IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS, + IRO_YSTORM_ROCE_ERROR_STATS, + IRO_PSTORM_ROCE_DCQCN_SENT_STATS, + IRO_USTORM_ROCE_CQE_STATS, +}; + +/* Pstorm LiteL2 queue statistics */ + +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \ + + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size) + +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size) + +/* Ustorm LiteL2 queue statistics */ +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size) + +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[IRO_ETH_RX_RATE_LIMIT].base \ + + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size) + +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \ + (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \ + + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1)) +#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size) + +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size) + +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone + * size mode. + */ +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ + (IRO[IRO_MSTORM_ETH_VF_PRODS].base \ + + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \ + + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size) + +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size) + +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size) + +/* Mstorm overlay buffer host address */ +#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base) +#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size) + +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_MSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1)) +#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size) + +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size) + +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \ + + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size) + +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size) + +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_FCOE_TX_STATS].base \ + + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size) + +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size) + +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size) + +/* Pstorm overlay buffer host address */ +#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base) +#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size) + +/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg + * data type. + */ +#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1)) +#define PSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size) + +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_PSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size) + +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size) + +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \ + + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size) + +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size) + +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \ + + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1)) +#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size) + +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_FCOE_RX_STATS].base \ + + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size) + +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size) + +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size) + +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_LL2_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size) + +/* Tstorm producers */ +#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \ + + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1)) +#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size) + +/* Tstorm overlay buffer host address */ +#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base) + +#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size) + +/* Tstorm LL2 packet duplication configuration. + * Use tstorm_pkt_dup_cfg data type. + */ +#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1)) +#define TSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size) + +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size) + +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size) + +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \ + + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size) + +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id. + */ +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \ + + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1)) +#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size) + +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1)) +#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size) + +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_EQE_CONS_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1)) +#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size) + +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size) + +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1)) +#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size) + +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size) + +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_USTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size) + +/* Ustorm overlay buffer host address */ +#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base) +#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size) + +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_USTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size) + +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_USTORM_ROCE_CQE_STATS].base \ + + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size) + +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_USTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size) + +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[IRO_USTORM_TOE_GRQ_PROD].base \ + + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size) + +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \ + + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1)) +#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size) + +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \ + (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size) + +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size) + +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \ + + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size) + +/* Xstorm overlay buffer host address */ +#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base) +#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size) + +/* Xstorm common PQ info */ +#define XSTORM_PQ_INFO_OFFSET(pq_id) \ + (IRO[IRO_XSTORM_PQ_INFO].base \ + + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1)) +#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size) + +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size) + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base) +#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size) + +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size) + +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size) + +/* Ystorm overlay buffer host address */ +#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base) +#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size) + +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size) + +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_YSTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size) + +/* Per-chip offsets in iro_arr in dwords */ +#define E4_IRO_ARR_OFFSET 0 +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index db926d8b3033..511ab214eb9c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -29,6 +29,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iro_hsi.h" #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" @@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dab7a5d09f87..dec2b00259d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn); void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** - * @brief - Fills provided statistics struct with statistics. + * qed_get_protocol_stats_iscsi(): Fills provided statistics + * struct with statistics. * - * @param cdev - * @param stats - points to struct that will be filled with statistics. + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 186d0048a9d1..1d1d4caad680 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT); + p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT); p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index dfaf10edfabf..2edd6bf64a3c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -28,6 +28,7 @@ #include "qed_dev_api.h" #include <linux/qed/qed_eth_if.h> #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_l2.h" @@ -37,7 +38,6 @@ #include "qed_sp.h" #include "qed_sriov.h" - #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 @@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, { u32 init_prod_val = 0; - *pp_prod = p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), @@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, { int rc; - rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, pbl_size, qed_get_cm_pq_idx_mcos(p_hwfn, tc)); @@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_cb, struct qed_ntuple_filter_params *p_params) { - struct rx_update_gft_filter_data *p_ramrod = NULL; + struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 abs_rx_q_id = 0; @@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, } rc = qed_sp_init_request(p_hwfn, &p_ent, - ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, CAU_SB_ENTRY_TIMER_RES0); address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); @@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, CAU_SB_ENTRY_TIMER_RES1); address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); @@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev, return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } -static int qed_configure_filter(struct qed_dev *cdev, - struct qed_filter_params *params) -{ - enum qed_filter_rx_mode_type accept_flags; - - switch (params->type) { - case QED_FILTER_TYPE_UCAST: - return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); - case QED_FILTER_TYPE_MCAST: - return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); - case QED_FILTER_TYPE_RX_MODE: - accept_flags = params->filter.accept_flags; - return qed_configure_filter_rx_mode(cdev, accept_flags); - default: - DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); - return -EINVAL; - } -} - static int qed_configure_arfs_searcher(struct qed_dev *cdev, enum qed_filter_config_mode mode) { @@ -2867,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } -static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac) { int i, ret; @@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .q_rx_stop = &qed_stop_rxq, .q_tx_start = &qed_start_txq, .q_tx_stop = &qed_stop_txq, - .filter_config = &qed_configure_filter, + .filter_config_rx_mode = &qed_configure_filter_rx_mode, + .filter_config_ucast = &qed_configure_filter_ucast, + .filter_config_mcast = &qed_configure_filter_mcast, .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8eceeebb1a7b..a538cf478c14 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -92,18 +92,18 @@ struct qed_filter_mcast { }; /** - * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. * - * @param p_hwfn - * @param p_rxq Handler of queue to close - * @param eq_completion_only If True completion will be on - * EQe, if False completion will be - * on EQe if p_hwfn opaque - * different from the RXQ opaque - * otherwise on CQe. - * @param cqe_completion If True completion will be - * receive on CQe. - * @return int + * @p_hwfn: HW device data. + * @p_rxq: Handler of queue to close + * @eq_completion_only: If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @cqe_completion: If True completion will be receive on CQe. + * + * Return: Int. */ int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, bool eq_completion_only, bool cqe_completion); /** - * @brief qed_eth_tx_queue_stop - closes a Tx queue + * qed_eth_tx_queue_stop(): Closes a Tx queue. * - * @param p_hwfn - * @param p_txq - handle to Tx queue needed to be closed + * @p_hwfn: HW device data. + * @p_txq: handle to Tx queue needed to be closed. * - * @return int + * Return: Int. */ int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); @@ -146,7 +146,6 @@ struct qed_sp_vport_start_params { int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); - struct qed_filter_accept_flags { u8 update_rx_mode_config; u8 update_tx_mode_config; @@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_vport_stop - - * - * This ramrod closes a VPort after all its RX and TX queues are terminated. - * An Assert is generated if any queues are left open. + * qed_sp_vport_stop: This ramrod closes a VPort after all its + * RX and TX queues are terminated. + * An Assert is generated if any queues are left open. * - * @param p_hwfn - * @param opaque_fid - * @param vport_id VPort ID + * @p_hwfn: HW device data. + * @opaque_fid: Opaque FID + * @vport_id: VPort ID. * - * @return int + * Return: Int. */ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); @@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_rx_eth_queues_update - + * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. + * It is used for setting the active state + * of the queue and updating the TPA and + * SGE parameters. + * @p_hwfn: HW device data. + * @pp_rxq_handlers: An array of queue handlers to be updated. + * @num_rxqs: number of queues to update. + * @complete_cqe_flg: Post completion to the CQE Ring if set. + * @complete_event_flg: Post completion to the Event Ring if set. + * @comp_mode: Comp mode. + * @p_comp_data: Pointer Comp data. * - * This ramrod updates an RX queue. It is used for setting the active state - * of the queue and updating the TPA and SGE parameters. + * Return: Int. * - * @note At the moment - only used by non-linux VFs. - * - * @param p_hwfn - * @param pp_rxq_handlers An array of queue handlers to be updated. - * @param num_rxqs number of queues to update. - * @param complete_cqe_flg Post completion to the CQE Ring if set - * @param complete_event_flg Post completion to the Event Ring if set - * @param comp_mode - * @param p_comp_data - * - * @return int + * Note At the moment - only used by non-linux VFs. */ int @@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); /** - * *@brief qed_arfs_mode_configure - - * - **Enable or disable rfs mode. It must accept atleast one of tcp or udp true - **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * qed_arfs_mode_configure(): Enable or disable rfs mode. + * It must accept at least one of tcp or udp true + * and at least one of ipv4 or ipv6 true to enable + * rfs mode. * - **@param p_hwfn - **@param p_ptt - **@param p_cfg_params - arfs mode configuration parameters. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_cfg_params: arfs mode configuration parameters. * + * Return. Void. */ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params); /** - * @brief - qed_configure_rfs_ntuple_filter + * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add + * or remove arfs hw filter * - * This ramrod should be used to add or remove arfs hw filter + * @p_hwfn: HW device data. + * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @p_params: Pointer to params. * - * @params p_hwfn - * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize - * it with cookie and callback function address, if not - * using this mode then client must pass NULL. - * @params p_params + * Return: Void. */ int qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, @@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); /** - * @brief - Starts an Rx queue, when queue_cid is already prepared + * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param bd_max_bytes - * @param bd_chain_phys_addr - * @param cqe_pbl_addr - * @param cqe_pbl_size + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @bd_max_bytes: Max bytes. + * @bd_chain_phys_addr: Chain physcial address. + * @cqe_pbl_addr: PBL address. + * @cqe_pbl_size: PBL size. * - * @return int + * Return: Int. */ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, @@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); /** - * @brief - Starts a Tx queue, where queue_cid is already prepared + * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param pbl_addr - * @param pbl_size - * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL size. + * @pq_id: Parameters for choosing the PQ for this Tx queue. * - * @return int + * Return: Int. */ int qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c46a7f756ed5..3fedcefc36d8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -28,6 +28,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_ll2.h" @@ -43,6 +44,8 @@ #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) +#define QED_LL2_INVALID_STATS_ID 0xff + struct qed_cb_ll2_info { int rx_cnt; u32 rx_size; @@ -62,6 +65,29 @@ struct qed_ll2_buffer { dma_addr_t phys_addr; }; +static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, + u8 ll2_queue_type, u8 qid) +{ + u8 stats_id; + + /* For legacy (RAM based) queues, the stats_id will be set as the + * queue_id. Otherwise (context based queue), it will be set to + * the "abs_pf_id" offset from the end of the RAM based queue IDs. + * If the final value exceeds the total counters amount, return + * INVALID value to indicate that the stats for this connection should + * be disabled. + */ + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + stats_id = qid; + else + stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; + + if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) + return stats_id; + else + return QED_LL2_INVALID_STATS_ID; +} + static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, @@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev, } static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, - struct qed_ll2_buffer *buffer) + struct qed_ll2_buffer *buffer) { spin_lock_bh(&cdev->ll2->lock); @@ -1124,6 +1150,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ @@ -1533,7 +1560,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_info *p_ll2_conn; struct qed_hwfn *p_hwfn = cxt; @@ -1544,7 +1571,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) int rc = -EINVAL; u32 i, capacity; size_t desc_size; - u8 qid; + u8 qid, stats_id; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) @@ -1610,16 +1637,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, p_ll2_conn->input.rx_conn_type); + stats_id = qed_ll2_handle_to_stats_id(p_hwfn, + p_ll2_conn->input.rx_conn_type, + qid); p_ll2_conn->queue_id = qid; - p_ll2_conn->tx_stats_id = qid; + p_ll2_conn->tx_stats_id = stats_id; - DP_VERBOSE(p_hwfn, QED_MSG_LL2, - "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n", - p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid); + /* If there is no valid stats id for this connection, disable stats */ + if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { + p_ll2_conn->tx_stats_en = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Disabling stats for queue %d - not enough counters\n", + qid); + } + + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n", + p_hwfn->rel_pf_id, + p_ll2_conn->input.rx_conn_type, qid, stats_id); if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { - p_rx->set_prod_addr = p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid); + p_rx->set_prod_addr = + (u8 __iomem *)p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_LL2_RX_PRODS, qid); } else { /* QED_LL2_RX_TYPE_CTX - using doorbell */ p_rx->ctx_based = 1; @@ -1762,7 +1805,7 @@ int qed_ll2_post_rx_buffer(void *cxt, } } - /* If we're lacking entires, let's try to flush buffers to FW */ + /* If we're lacking entries, let's try to flush buffers to FW */ if (!p_curp || !p_curb) { rc = -EBUSY; p_curp = NULL; @@ -2609,7 +2652,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) DP_NOTICE(cdev, "Failed to add an LLH filter\n"); goto err3; } - } ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index df88d00053a2..0bfc375161ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -32,7 +32,6 @@ #define QED_LL2_LEGACY_CONN_BASE_PF 0 #define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF - struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -119,41 +118,41 @@ struct qed_ll2_info { extern const struct qed_ll2_ops qed_ll2_ops_pass; /** - * @brief qed_ll2_acquire_connection - allocate resources, - * starts rx & tx (if relevant) queues pair. Provides - * connecion handler as output parameter. + * qed_ll2_acquire_connection(): Allocate resources, + * starts rx & tx (if relevant) queues pair. + * Provides connecion handler as output + * parameter. * + * @cxt: Pointer to the hw-function [opaque to some]. + * @data: Describes connection parameters. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param data - describes connection parameters - * @return int + * Return: Int. */ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** - * @brief qed_ll2_establish_connection - start previously - * allocated LL2 queues pair + * qed_ll2_establish_connection(): start previously allocated LL2 queues pair * - * @param cxt - pointer to the hw-function [opaque to some] - * @param p_ptt - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. + * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param addr rx (physical address) buffers to submit - * @param cookie - * @param notify_fw produce corresponding Rx BD immediately + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: RX (physical address) buffers to submit. + * @buf_len: Buffer Len. + * @cookie: Cookie. + * @notify_fw: Produce corresponding Rx BD immediately. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, @@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt, u16 buf_len, void *cookie, u8 notify_fw); /** - * @brief qed_ll2_prepare_tx_packet - request for start Tx BD - * to prepare Tx packet submission to FW. + * qed_ll2_prepare_tx_packet(): Request for start Tx BD + * to prepare Tx packet submission to FW. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle - * @param pkt - info regarding the tx packet - * @param notify_fw - issue doorbell to fw for this packet + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: Connection handle. + * @pkt: Info regarding the tx packet. + * @notify_fw: Issue doorbell to fw for this packet. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, @@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt, bool notify_fw); /** - * @brief qed_ll2_release_connection - releases resources - * allocated for LL2 connection + * qed_ll2_release_connection(): Releases resources allocated for LL2 + * connection. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * Return: Void. */ void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill - * Tx BD of BDs requested by - * qed_ll2_prepare_tx_packet + * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill + * Tx BD of BDs requested by + * qed_ll2_prepare_tx_packet * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection - * @param addr - * @param nbytes + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: Address. + * @nbytes: Number of bytes. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); /** - * @brief qed_ll2_terminate_connection - stops Tx/Rx queues - * + * qed_ll2_terminate_connection(): Stops Tx/Rx queues * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_get_stats - get LL2 queue's statistics + * qed_ll2_get_stats(): Get LL2 queue's statistics * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @p_stats: Pointer Status. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param p_stats - * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** - * @brief qed_ll2_alloc - Allocates LL2 connections set + * qed_ll2_alloc(): Allocates LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_ll2_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_setup - Inits LL2 connections set + * qed_ll2_setup(): Inits LL2 connections set. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_free - Releases LL2 connections set + * qed_ll2_free(): Releases LL2 connections set + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d10e1cd6d2ba..7673b3e07736 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = { ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, }; -static const u32 qed_mfw_ext_20g[] __initconst = { - ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, -}; - static const u32 qed_mfw_ext_25g[] __initconst = { ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, @@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), - QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, @@ -262,7 +257,7 @@ module_exit(qed_exit); /* Check if the DMA controller on the machine can properly handle the DMA * addressing required by the device. -*/ + */ static int qed_set_coherency_mask(struct qed_dev *cdev) { struct device *dev = &cdev->pdev->dev; @@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, goto err2; } - DP_INFO(cdev, "qed_probe completed successfully\n"); + DP_INFO(cdev, "%s completed successfully\n", __func__); return cdev; @@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, rc = qed_set_int_mode(cdev, false); if (rc) { - DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); + DP_ERR(cdev, "%s ERR\n", __func__); return rc; } @@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(wq_flag, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); @@ -1382,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); - strlcpy(drv_version.name, params->name, + strscpy(drv_version.name, params->name, MCP_DRV_VER_STR_SIZE - 4); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); @@ -2892,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active) return status; } -static int qed_update_mac(struct qed_dev *cdev, u8 *mac) +static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; @@ -3079,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn) DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, "Scheduling slowpath task [Flag: %d]\n", QED_SLOWPATH_MFW_TLV_REQ); + /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); @@ -3159,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, return 0; } + +unsigned long qed_get_epoch_time(void) +{ + return ktime_get_real_seconds(); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 24cd41567775..64678a256f3b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -17,6 +17,7 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" +#include "qed_mfw_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -30,11 +31,11 @@ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ - qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ _val) #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ - qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ @@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); /* Get the union data */ - if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) { + if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); @@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); memset(&union_data, 0, sizeof(union_data)); - if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size) + if (p_mb_params->p_data_src && p_mb_params->data_src_size) memcpy(&union_data, p_mb_params->p_data_src, p_mb_params->data_src_size); qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, @@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) { struct qed_mcp_mb_params mb_params; u8 raw_data[MCP_DRV_NVM_BUF_LEN]; @@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, /* Use the maximal value since the actual one is part of the response */ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + if (b_can_sleep) + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) @@ -916,7 +920,6 @@ enum qed_load_req_force { }; static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, - enum qed_load_req_force force_cmd, u8 *p_mfw_force_cmd) { @@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { ext_speed = 0; if (params->ext_speed.autoneg) - ext_speed |= ETH_EXT_SPEED_AN; + ext_speed |= ETH_EXT_SPEED_NONE; val = params->ext_speed.forced_speed; if (val & QED_EXT_SPEED_1G) ext_speed |= ETH_EXT_SPEED_1G; if (val & QED_EXT_SPEED_10G) ext_speed |= ETH_EXT_SPEED_10G; - if (val & QED_EXT_SPEED_20G) - ext_speed |= ETH_EXT_SPEED_20G; if (val & QED_EXT_SPEED_25G) ext_speed |= ETH_EXT_SPEED_25G; if (val & QED_EXT_SPEED_40G) @@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ext_speed |= ETH_EXT_ADV_SPEED_1G; if (val & QED_EXT_SPEED_MASK_10G) ext_speed |= ETH_EXT_ADV_SPEED_10G; - if (val & QED_EXT_SPEED_MASK_20G) - ext_speed |= ETH_EXT_ADV_SPEED_20G; if (val & QED_EXT_SPEED_MASK_25G) ext_speed |= ETH_EXT_ADV_SPEED_25G; if (val & QED_EXT_SPEED_MASK_40G) @@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id) { - u32 global_offsize; + u32 global_offsize, public_base; if (IS_VF(p_hwfn->cdev)) { if (p_hwfn->vf_iov_info) { @@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, } } + public_base = p_hwfn->mcp_info->public_base; global_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_hwfn-> - mcp_info->public_base, + SECTION_OFFSIZE_ADDR(public_base, PUBLIC_GLOBAL)); *p_mfw_ver = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, mfw_ver)); - if (p_running_bundle_id != NULL) { + if (p_running_bundle_id) { *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, @@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, return 0; } + static bool qed_is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) { @@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to Legacy capabilities, L2 personality is %08x\n", - (u32) *p_proto); + (u32)*p_proto); } static int @@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", - (u32) *p_proto, resp, param); + (u32)*p_proto, resp, param); return 0; } @@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; - case FUNC_MF_CFG_PROTOCOL_NVMETCP: - *p_proto = QED_PCI_NVMETCP; - break; case FUNC_MF_CFG_PROTOCOL_FCOE: *p_proto = QED_PCI_FCOE; break; @@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, } int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac) + struct qed_ptt *p_ptt, const u8 *mac) { struct qed_mcp_mb_params mb_params; u32 mfw_mac[2]; @@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), false); if (rc || (resp != FW_MSG_CODE_NVM_OK)) { DP_NOTICE(cdev, "MCP command rc = %d\n", rc); @@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) } /* This can be a lengthy process, and it's possible scheduler - * isn't preemptable. Sleep a bit to prevent CPU hogging. + * isn't preemptible. Sleep a bit to prevent CPU hogging. */ if (bytes_left % 0x1000 < (bytes_left - read_len) % 0x1000) @@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, * to be delivered to MFW. */ if (param && cmd == QED_PUT_FILE_DATA) { - buf_idx = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); - buf_size = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + buf_idx = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); } else { buf_idx += buf_size; buf_size = min_t(u32, (len - buf_idx), @@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, ¶m, &buf_size, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), true); if (rc) { DP_NOTICE(p_hwfn, "Failed to send a transceiver read command to the MFW. rc = %d.\n", @@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, DRV_MSG_CODE_BIST_TEST, param, &resp, &resp_param, &buf_size, - (u32 *)p_image_att); + (u32 *)p_image_att, false); if (rc) return rc; @@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, type = NVM_TYPE_DEFAULT_CFG; break; case QED_NVM_IMAGE_NVM_META: - type = NVM_TYPE_META; + type = NVM_TYPE_NVM_META; break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", @@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; - if (QED_IS_E5(p_hwfn->cdev)) - features |= - DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL; - return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } @@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_NVM_CFG_OPTION, - mb_param, &resp, ¶m, p_len, (u32 *)p_buf); + mb_param, &resp, ¶m, p_len, + (u32 *)p_buf, false); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8edb450d0abf..564723800d15 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -266,97 +266,97 @@ union qed_mfw_tlv_data { #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) /** - * @brief - returns the link params of the hw function + * qed_mcp_get_link_params(): Returns the link params of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link params + * Returns: Pointer to link params. */ -struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); /** - * @brief - return the link state of the hw function + * qed_mcp_get_link_state(): Return the link state of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link state + * Returns: Pointer to link state. */ -struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); /** - * @brief - return the link capabilities of the hw function + * qed_mcp_get_link_capabilities(): Return the link capabilities of the + * hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link capabilities + * Returns: Pointer to link capabilities. */ struct qed_mcp_link_capabilities *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); /** - * @brief Request the MFW to set the the link according to 'link_input'. + * qed_mcp_set_link(): Request the MFW to set the link according + * to 'link_input'. * - * @param p_hwfn - * @param p_ptt - * @param b_up - raise link if `true'. Reset link if `false'. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_up: Raise link if `true'. Reset link if `false'. * - * @return int + * Return: Int. */ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up); /** - * @brief Get the management firmware version value + * qed_mcp_get_mfw_ver(): Get the management firmware version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mfw_ver - mfw version value - * @param p_running_bundle_id - image id in nvram; Optional. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mfw_ver: MFW version value. + * @p_running_bundle_id: Image id in nvram; Optional. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); /** - * @brief Get the MBI version value + * qed_mcp_get_mbi_ver(): Get the MBI version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mbi_ver); /** - * @brief Get media type value of the port. + * qed_mcp_get_media_type(): Get media type value of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param mfw_ver - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @media_type: Media type value * - * @return int - - * 0 - Operation was successul. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *media_type); /** - * @brief Get transceiver data of the port. + * qed_mcp_get_transceiver_data(): Get transceiver data of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_transceiver_state - transceiver state. - * @param p_transceiver_type - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_transceiver_state: Transceiver state. + * @p_tranceiver_type: Media type value. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, u32 *p_tranceiver_type); /** - * @brief Get transceiver supported speed mask. + * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_speed_mask - Bit mask of all supported speeds. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_speed_mask: Bit mask of all supported speeds. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask); /** - * @brief Get board configuration. + * qed_mcp_get_board_config(): Get board configuration. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_board_config - Board config. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_board_config: Board config. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_board_config); /** - * @brief General function for sending commands to the MCP - * mailbox. It acquire mutex lock for the entire - * operation, from sending the request until the MCP - * response. Waiting for MCP response will be checked up - * to 5 seconds every 5ms. + * qed_mcp_cmd(): General function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 5ms. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param cmd - command to be sent to the MCP. - * @param param - Optional param - * @param o_mcp_resp - The MCP response code (exclude sequence). - * @param o_mcp_param- Optional parameter provided by the MCP + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP * response - * @return int - 0 - operation - * was successul. + * + * Return: Int - 0 - Operation was successul. */ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param); /** - * @brief - drains the nig, allowing completion to pass in case of pauses. - * (Should be called only from sleepable context) + * qed_mcp_drain(): drains the nig, allowing completion to pass in + * case of pauses. + * (Should be called only from sleepable context) * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. */ int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the flash size value + * qed_mcp_get_flash_size(): Get the flash size value. * - * @param p_hwfn - * @param p_ptt - * @param p_flash_size - flash size in bytes to be filled. + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_flash_size: Flash size in bytes to be filled. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_flash_size); /** - * @brief Send driver version to MFW + * qed_mcp_send_drv_version(): Send driver version to MFW. * - * @param p_hwfn - * @param p_ptt - * @param version - Version value - * @param name - Protocol driver name + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_ver: Version value. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, @@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** - * @brief Read the MFW process kill counter + * qed_get_process_kill_counter(): Read the MFW process kill counter. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return u32 + * Return: u32. */ u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Trigger a recovery process + * qed_start_recovery_process(): Trigger a recovery process. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief A recovery handler must call this function as its first step. - * It is assumed that the handler is not run from an interrupt context. + * qed_recovery_prolog(): A recovery handler must call this function + * as its first step. + * It is assumed that the handler is not run from + * an interrupt context. * - * @param cdev - * @param p_ptt + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_recovery_prolog(struct qed_dev *cdev); /** - * @brief Notify MFW about the change in base device properties + * qed_mcp_ov_update_current_config(): Notify MFW about the change in base + * device properties * - * @param p_hwfn - * @param p_ptt - * @param client - qed client type + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @client: Qed client type. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_client client); /** - * @brief Notify MFW about the driver state + * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. * - * @param p_hwfn - * @param p_ptt - * @param drv_state - Driver state + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @drv_state: Driver state. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_driver_state drv_state); /** - * @brief Send MTU size to MFW + * qed_mcp_ov_update_mtu(): Send MTU size to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mtu - MTU size + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mtu: MTU size. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 mtu); /** - * @brief Send MAC address to MFW + * qed_mcp_ov_update_mac(): Send MAC address to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mac - MAC address + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mac: MAC address. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac); + struct qed_ptt *p_ptt, const u8 *mac); /** - * @brief Send WOL mode to MFW + * qed_mcp_ov_update_wol(): Send WOL mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param wol - WOL mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @wol: WOL mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_wol wol); /** - * @brief Set LED status + * qed_mcp_set_led(): Set LED status. * - * @param p_hwfn - * @param p_ptt - * @param mode - LED mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mode: LED mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); /** - * @brief Read from nvm + * qed_mcp_nvm_read(): Read from NVM. * - * @param cdev - * @param addr - nvm offset - * @param p_buf - nvm read buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @p_buf: NVM read buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); /** - * @brief Write to nvm + * qed_mcp_nvm_write(): Write to NVM. * - * @param cdev - * @param addr - nvm offset - * @param cmd - nvm command - * @param p_buf - nvm write buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @cmd: NVM command. + * @p_buf: NVM write buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Check latest response + * qed_mcp_nvm_resp(): Check latest response. * - * @param cdev - * @param p_buf - nvm write buffer + * @cdev: Qed dev pointer. + * @p_buf: NVM write buffer. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); @@ -604,13 +606,13 @@ struct qed_nvm_image_att { }; /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image to get attributes for - * @param p_image_att - image attributes structure into which to fill data + * @p_hwfn: HW device data. + * @image_id: Image to get attributes for. + * @p_image_att: Image attributes structure into which to fill data. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, @@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att); /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image requested for reading - * @param p_buffer - allocated buffer into which to fill data - * @param buffer_len - length of the allocated buffer. + * @p_hwfn: HW device data. + * @image_id: image requested for reading. + * @p_buffer: allocated buffer into which to fill data. + * @buffer_len: length of the allocated buffer. * - * @return 0 iff p_buffer now contains the nvram image. + * Return: 0 if p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); /** - * @brief Bist register test + * qed_mcp_bist_register_test(): Bist register test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist clock test + * qed_mcp_bist_clock_test(): Bist clock test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist nvm test - get number of images + * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param num_images - number of images if operation was + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @num_images: number of images if operation was * successful. 0 if not. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *num_images); /** - * @brief Bist nvm test - get image attributes by index + * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes + * by index. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_image_att - Attributes of image - * @param image_index - Index of image to get information for + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_image_att: Attributes of image. + * @image_index: Index of image to get information for. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, u32 image_index); /** - * @brief - Processes the TLV request from MFW i.e., get the required TLV info - * from the qed client and send it to the MFW. + * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., + * get the required TLV info + * from the qed client and send it to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Send raw debug data to the MFW + * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_buf: raw debug data buffer. + * @size: Buffer size. * - * @param p_hwfn - * @param p_ptt - * @param p_buf - raw debug data buffer - * @param size - buffer size + * Return : Int. */ int qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, @@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) } /** - * @brief Initialize the interface with the MCP + * qed_mcp_cmd_init(): Initialize the interface with the MCP. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Initialize the port interface with the MCP + * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. * - * @param p_hwfn - * @param p_ptt * Can only be called after `num_ports_in_engines' is set */ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Releases resources allocated during the init process. + * qed_mcp_free(): Releases resources allocated during the init process. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW function. * - * @return int + * Return: Int. */ int qed_mcp_free(struct qed_hwfn *p_hwfn); /** - * @brief This function is called from the DPC context. After - * pointing PTT to the mfw mb, check for events sent by the MCP - * to the driver and ack them. In case a critical event - * detected, it will be handled here, otherwise the work will be - * queued to a sleepable work-queue. + * qed_mcp_handle_events(): This function is called from the DPC context. + * After pointing PTT to the mfw mb, check for events sent by + * the MCP to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @p_hwfn: HW function. + * @p_ptt: PTT required for register access. * - * @param p_hwfn - HW function - * @param p_ptt - PTT required for register access - * @return int - 0 - operation - * was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -858,169 +866,177 @@ struct qed_load_req_params { }; /** - * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, - * returns whether this PF is the first on the engine/port or function. + * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the + * operation succeeds, returns whether this PF is + * the first on the engine/port or function. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params); /** - * @brief Sends a LOAD_DONE message to the MFW + * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_REQ message to the MFW + * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_DONE message to the MFW + * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read the MFW mailbox into Current buffer. + * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Ack to mfw that driver finished FLR process for VFs + * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs * - * @param p_hwfn - * @param p_ptt - * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. * - * @param return int - 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *vfs_to_ack); /** - * @brief - calls during init to read shmem of all function-related info. + * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of + * all function-related info. * - * @param p_hwfn + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Reset the MCP using mailbox command. + * qed_mcp_reset(): Reset the MCP using mailbox command. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Sends an NVM read command request to the MFW to get - * a buffer. - * - * @param p_hwfn - * @param p_ptt - * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or - * DRV_MSG_CODE_NVM_READ_NVRAM commands - * @param param - [0:23] - Offset [24:31] - Size - * @param o_mcp_resp - MCP response - * @param o_mcp_param - MCP response param - * @param o_txn_size - Buffer size output - * @param o_buf - Pointer to the buffer returned by the MFW. + * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get + * a buffer. * - * @param return 0 upon success. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands. + * @param: [0:23] - Offset [24:31] - Size. + * @o_mcp_resp: MCP response. + * @o_mcp_param: MCP response param. + * @o_txn_size: Buffer size output. + * @o_buf: Pointer to the buffer returned by the MFW. + * @b_can_sleep: Can sleep. + * + * Return: 0 upon success. */ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep); /** - * @brief Read from sfp + * qed_mcp_phy_sfp_read(): Read from sfp. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param port - transceiver port - * @param addr - I2C address - * @param offset - offset in sfp - * @param len - buffer length - * @param p_buf - buffer to read into + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @port: transceiver port. + * @addr: I2C address. + * @offset: offset in sfp. + * @len: buffer length. + * @p_buf: buffer to read into. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); /** - * @brief indicates whether the MFW objects [under mcp_info] are accessible + * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] + * are accessible * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return true iff MFW is running and mcp_info is initialized + * Return: true if MFW is running and mcp_info is initialized. */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); /** - * @brief request MFW to configure MSI-X for a VF + * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. * - * @param p_hwfn - * @param p_ptt - * @param vf_id - absolute inside engine - * @param num_sbs - number of entries to request + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vf_id: absolute inside engine. + * @num: number of entries to request. * - * @return int + * Return: Int. */ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num); /** - * @brief - Halt the MCP. + * qed_mcp_halt(): Halt the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Wake up the MCP. + * qed_mcp_resume: Wake up the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); -/* @brief - Gets the mdump retained data from the MFW. +/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. * - * @param p_hwfn - * @param p_ptt - * @param p_mdump_retain + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mdump_retain: mdump retain. * - * @param return 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, @@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, struct mdump_retain_data_stc *p_mdump_retain); /** - * @brief - Sets the MFW's max value for the given resource + * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param resc_max_val - * @param p_mcp_resp + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: RES ID. + * @resc_max_val: Resec max val. + * @p_mcp_resp: MCP Resp * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, @@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, u32 resc_max_val, u32 *p_mcp_resp); /** - * @brief - Gets the MFW allocation info for the given resource + * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given + * resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param p_mcp_resp - * @param p_resc_num - * @param p_resc_start + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: Res ID. + * @p_mcp_resp: MCP resp. + * @p_resc_num: Resc num. + * @p_resc_start: Resc start. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, @@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); /** - * @brief Send eswitch mode to MFW + * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param eswitch - eswitch mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @eswitch: eswitch mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1113,12 +1130,12 @@ enum qed_resc_lock { }; /** - * @brief - Initiates PF FLR + * qed_mcp_initiate_pf_flr(): Initiates PF FLR. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); struct qed_resc_lock_params { @@ -1151,13 +1168,13 @@ struct qed_resc_lock_params { }; /** - * @brief Acquires MFW generic resource lock + * qed_mcp_resc_lock(): Acquires MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, @@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params { }; /** - * @brief Releases MFW generic resource lock + * qed_mcp_resc_unlock(): Releases MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, @@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_resc_unlock_params *p_params); /** - * @brief - default initialization for lock/unlock resource structs + * qed_mcp_resc_lock_default_init(): Default initialization for + * lock/unlock resource structs. + * + * @p_lock: lock params struct to be initialized; Can be NULL. + * @p_unlock: unlock params struct to be initialized; Can be NULL. + * @resource: the requested resource. + * @b_is_permanent: disable retries & aging when set. * - * @param p_lock - lock params struct to be initialized; Can be NULL - * @param p_unlock - unlock params struct to be initialized; Can be NULL - * @param resource - the requested resource - * @paral b_is_permanent - disable retries & aging when set + * Return: Void. */ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, @@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, resource, bool b_is_permanent); /** - * @brief - Return whether management firmware support smart AN + * qed_mcp_is_smart_an_supported(): Return whether management firmware + * support smart AN * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return bool - true if feature is supported. + * Return: bool true if feature is supported. */ bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); /** - * @brief Learn of supported MFW features; To be done during early init + * qed_mcp_get_capabilities(): Learn of supported MFW features; + * To be done during early init. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Inform MFW of set of features supported by driver. Should be done - * inside the content of the LOAD_REQ. + * qed_mcp_set_capabilities(): Inform MFW of set of features supported + * by driver. Should be done inside the content + * of the LOAD_REQ. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read ufp config from the shared memory. + * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Populate the nvm info shadow in the given hardware function + * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** - * @brief Delete nvm info shadow in the given hardware function + * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); /** - * @brief Get the engine affinity configuration. + * qed_mcp_get_engine_config(): Get the engine affinity configuration. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the PPFID bitmap. + * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get NVM config attribute value. + * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param p_len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @p_len: Len. + * + * Return: Int. */ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 *p_len); /** - * @brief Set NVM config attribute value. + * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @len: Len. + * + * Return: Int. */ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h new file mode 100644 index 000000000000..8a0e3c5d4bda --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -0,0 +1,2474 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_MFW_HSI_H +#define _QED_MFW_HSI_H + +#define MFW_TRACE_SIGNATURE 0x25071946 + +/* The trace in the buffer */ +#define MFW_TRACE_EVENTID_MASK 0x00ffff +#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 +#define MFW_TRACE_PRM_SIZE_OFFSET 16 +#define MFW_TRACE_ENTRY_SIZE 3 + +struct mcp_trace { + u32 signature; /* Help to identify that the trace is valid */ + u32 size; /* the size of the trace buffer in bytes */ + u32 curr_level; /* 2 - all will be written to the buffer + * 1 - debug trace will not be written + * 0 - just errors will be written to the buffer + */ + u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means + * mask it. + */ + + /* Warning: the following pointers are assumed to be 32bits as they are + * used only in the MFW. + */ + u32 trace_prod; /* The next trace will be written to this offset */ + u32 trace_oldest; /* The oldest valid trace starts at this offset + * (usually very close after the current producer). + */ +}; + +#define VF_MAX_STATIC 192 +#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32) +#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) + +#define EXT_VF_MAX_STATIC 240 +#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1) +#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) +#define ADDED_VF_BITMAP_SIZE 2 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 +#define MCP_GLOB_PORT_MAX 4 +#define MCP_GLOB_FUNC_MAX 16 + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +#define SECTION_OFFSET(_offsize) (((((_offsize) & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) + +#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) + +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET((_offsize)) + \ + (QED_SECTION_SIZE((_offsize)) * (idx))) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + ((_pub_base) + offsetof(struct mcp_public_data, sections[_section])) + +/* PHY configuration */ +struct eth_phy_cfg { + u32 speed; +#define ETH_SPEED_AUTONEG 0x0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + + u32 loopback_mode; +#define ETH_LOOPBACK_NONE 0x0 +#define ETH_LOOPBACK_INT_PHY 0x1 +#define ETH_LOOPBACK_EXT_PHY 0x2 +#define ETH_LOOPBACK_EXT 0x3 +#define ETH_LOOPBACK_MAC 0x4 +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 +#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 + + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 +#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 + + u32 link_modes; + + u32 fec_mode; +#define FEC_FORCE_MODE_MASK 0x000000ff +#define FEC_FORCE_MODE_OFFSET 0 +#define FEC_FORCE_MODE_NONE 0x00 +#define FEC_FORCE_MODE_FIRECODE 0x01 +#define FEC_FORCE_MODE_RS 0x02 +#define FEC_FORCE_MODE_AUTO 0x07 +#define FEC_EXTENDED_MODE_MASK 0xffffff00 +#define FEC_EXTENDED_MODE_OFFSET 8 +#define ETH_EXT_FEC_NONE 0x00000000 +#define ETH_EXT_FEC_10G_NONE 0x00000100 +#define ETH_EXT_FEC_10G_BASE_R 0x00000200 +#define ETH_EXT_FEC_25G_NONE 0x00000400 +#define ETH_EXT_FEC_25G_BASE_R 0x00000800 +#define ETH_EXT_FEC_25G_RS528 0x00001000 +#define ETH_EXT_FEC_40G_NONE 0x00002000 +#define ETH_EXT_FEC_40G_BASE_R 0x00004000 +#define ETH_EXT_FEC_50G_NONE 0x00008000 +#define ETH_EXT_FEC_50G_BASE_R 0x00010000 +#define ETH_EXT_FEC_50G_RS528 0x00020000 +#define ETH_EXT_FEC_50G_RS544 0x00040000 +#define ETH_EXT_FEC_100G_NONE 0x00080000 +#define ETH_EXT_FEC_100G_BASE_R 0x00100000 +#define ETH_EXT_FEC_100G_RS528 0x00200000 +#define ETH_EXT_FEC_100G_RS544 0x00400000 + + u32 extended_speed; +#define ETH_EXT_SPEED_MASK 0x0000ffff +#define ETH_EXT_SPEED_OFFSET 0 +#define ETH_EXT_SPEED_NONE 0x00000001 +#define ETH_EXT_SPEED_1G 0x00000002 +#define ETH_EXT_SPEED_10G 0x00000004 +#define ETH_EXT_SPEED_25G 0x00000008 +#define ETH_EXT_SPEED_40G 0x00000010 +#define ETH_EXT_SPEED_50G_BASE_R 0x00000020 +#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040 +#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080 +#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100 +#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200 +#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000 +#define ETH_EXT_ADV_SPEED_OFFSET 16 +#define ETH_EXT_ADV_SPEED_1G 0x00010000 +#define ETH_EXT_ADV_SPEED_10G 0x00020000 +#define ETH_EXT_ADV_SPEED_25G 0x00040000 +#define ETH_EXT_ADV_SPEED_40G 0x00080000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000 +#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000 +}; + +struct port_mf_cfg { + u32 dynamic_cfg; +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +struct eth_stats { + u64 r64; + u64 r127; + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + + union { + struct { + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + } bb0; + struct { + u64 unused1; + u64 r1519_to_max; + u64 unused2; + u64 unused3; + u64 unused4; + } ah0; + } u0; + + u64 rfcs; + u64 rxcf; + u64 rxpf; + u64 rxpp; + u64 raln; + u64 rfcr; + u64 rovr; + u64 rjbr; + u64 rund; + u64 rfrg; + u64 t64; + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + + union { + struct { + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + } bb1; + struct { + u64 t1519_to_max; + u64 unused6; + u64 unused7; + u64 unused8; + } ah1; + } u1; + + u64 txpf; + u64 txpp; + + union { + struct { + u64 tlpiec; + u64 tncl; + } bb2; + struct { + u64 unused9; + u64 unused10; + } ah2; + } u2; + + u64 rbyte; + u64 rxuca; + u64 rxmca; + u64 rxbca; + u64 rxpok; + u64 tbyte; + u64 txuca; + u64 txmca; + u64 txbca; + u64 txcf; +}; + +struct pkt_type_cnt { + u64 tc_tx_pkt_cnt[8]; + u64 tc_tx_oct_cnt[8]; + u64 priority_rx_pkt_cnt[8]; + u64 priority_rx_oct_cnt[8]; +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct eth_stats eth; +}; + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM BIT(0) + +#define PORT_CMT_PORT_ROLE BIT(1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE BIT(1) + +#define PORT_CMT_TEAM_MASK BIT(2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 BIT(2) +}; + +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 +#define MAX_TLV_BUFFER 128 + +enum _lldp_agent { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +}; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 +#define DCBX_OOO_TC_MASK 0x00000f00 +#define DCBX_OOO_TC_SHIFT 8 + u32 pri_tc_tbl[1]; +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +struct dcbx_features { + struct dcbx_ets_feature ets; + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 + + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_STATIC 4 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u32 flags; +#define LLDP_SYSTEM_TLV_VALID_MASK 0x1 +#define LLDP_SYSTEM_TLV_VALID_OFFSET 0 +#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2 +#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1 +#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000 +#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16 + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +struct lldp_received_tlvs_s { + u32 prefix_seq_num; + u32 length; + u32 tlvs_buffer[MAX_TLV_BUFFER]; + u32 suffix_seq_num; +}; + +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_SHIFT 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct generic_idc_msg_s { + u32 source_pf; + struct mcp_val64 msg; +}; + +struct pcie_stats_stc { + u32 sr_cnt_wr_byte_msb; + u32 sr_cnt_wr_byte_lsb; + u32 sr_cnt_wr_cnt; + u32 sr_cnt_rd_byte_msb; + u32 sr_cnt_rd_byte_lsb; + u32 sr_cnt_rd_cnt; +}; + +enum _attribute_commands_e { + ATTRIBUTE_CMD_READ = 0, + ATTRIBUTE_CMD_WRITE, + ATTRIBUTE_CMD_READ_CLEAR, + ATTRIBUTE_CMD_CLEAR, + ATTRIBUTE_NUM_OF_COMMANDS +}; + +struct public_global { + u32 max_path; + u32 max_ports; +#define MODE_1P 1 +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; + u32 ext_phy_upgrade_fw; + u8 runtime_port_swap_map[MODE_4P]; + u32 data_ptr; + u32 data_size; + u32 bmb_error_status_cnt; + u32 bmb_jumbo_frame_cnt; + u32 sent_to_bmc_cnt; + u32 handled_by_mfw; + u32 sent_to_nw_cnt; + u32 to_bmc_kb_per_second; + u32 bcast_dropped_to_bmc_cnt; + u32 mcast_dropped_to_bmc_cnt; + u32 ucast_dropped_to_bmc_cnt; + u32 ncsi_response_failure_cnt; + u32 device_attr; + u32 vpd_warning; +}; + +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; +}; + +struct public_path { + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit)) +}; + +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct dci_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct dci_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct dci_fc_npiv_tbl { + struct dci_fc_npiv_cfg fc_npiv_cfg; + struct dci_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +struct pause_flood_monitor { + u8 period_cnt; + u8 any_brb_prs_packet_hist; + u8 any_brb_block_is_full_hist; + u8 flags; + u32 num_of_state_changes; +}; + +struct public_port { + u32 validity_map; + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) +#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30) + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; + u32 link_change_count; + + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10 +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a + + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 module_info; + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 + + struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS]; + u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA]; + u32 phy_module_temperature; + u32 nig_reg_stat_rx_bmb_packet; + u32 nig_reg_rx_llh_ncsi_mcp_mask; + u32 nig_reg_rx_llh_ncsi_mcp_mask_2; + struct pause_flood_monitor pause_flood_monitor; + u32 nig_drain_cnt; + struct pkt_type_cnt pkt_tc_priority_cnt; +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct public_func { + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 mtu_size; + + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + + struct generic_idc_msg_s generic_idc_msg; + + u32 num_of_msix; + + u32 config; +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 +#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 + +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 + + u32 mac_upper; +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 ovlan_stag; +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; + + u32 preserve_data; + + u32 driver_last_activity_ts; + + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 + +#define LOAD_REQ_HSI_VERSION 2 +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_SHIFT) + +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 + + struct drv_version_stc drv_ver; +}; + +struct mcp_mac { + u32 mac_upper; + u32 mac_lower; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; + u32 nvm_start_addr; + u32 len; +}; + +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct iscsi_stats_stc { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct rdma_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +struct fcoe_cap_stc { + u32 max_ios; + u32 max_log; + u32 max_exch; + u32 max_npiv; + u32 max_tgt; + u32 max_outstnd; +}; + +#define MAX_NUM_OF_SENSORS 7 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; +}; + +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_BDQ_E = 17, + RESOURCE_QCN_E = 18, + RESOURCE_LLH_FILTER_E = 19, + RESOURCE_VF_MAC_ADDR = 20, + RESOURCE_LL2_CQS_E = 21, + RESOURCE_VF_CNQS = 22, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT BIT(0) +}; + +struct mcp_wwn { + u32 wwn_upper; + u32 wwn_lower; +}; + +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_SHIFT 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_SHIFT 8 +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_SHIFT 16 +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_SHIFT 20 +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_SHIFT 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_SHIFT 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_SHIFT 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + +struct mdump_retain_data_stc { + u32 valid; + u32 epoch; + u32 pf; + u32 status; +}; + +struct attribute_cmd_write_stc { + u32 val; + u32 mask; + u32 offset; +}; + +struct lldp_stats_stc { + u32 tx_frames_total; + u32 rx_frames_total; + u32 rx_frames_discarded; + u32 rx_age_outs; +}; + +struct get_att_ctrl_stc { + u32 disabled_attns; + u32 controllable_attns; +}; + +struct trace_filter_stc { + u32 level; + u32 modules; +}; + +union drv_union_data { + struct mcp_mac wol_mac; + + struct eth_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; + struct iscsi_stats_stc iscsi_stats; + struct rdma_stats_stc rdma_stats; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct resource_info resource; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; + struct mcp_mac lldp_mac; + struct mcp_wwn fcoe_fabric_name; + u32 dword; + + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + struct mdump_retain_data_stc mdump_retain; + struct attribute_cmd_write_stc attribute_cmd_write; + struct lldp_stats_stc lldp_stats; + struct pcie_stats_stc pcie_stats; + + struct get_att_ctrl_stc get_att_ctrl; + struct fcoe_cap_stc fcoe_cap; + struct trace_filter_stc trace_filter; +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define DRV_MSG_SEQ_NUMBER_OFFSET 0 +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_OFFSET 16 + + u32 drv_mb_param; + + u32 fw_mb_header; +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define FW_MSG_SEQ_NUMBER_OFFSET 0 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_OFFSET 16 + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + union drv_union_data union_data; +}; + +#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET) +enum drv_msg_code_enum { + DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001), + DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002), + DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003), + DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005), + DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006), + DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009), + DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f), + DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010), + DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011), + DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012), + DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013), + DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016), + DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a), + DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e), + DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020), + DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023), + DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025), + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b), + DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e), + DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f), + DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030), + DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031), + DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037), + DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e), + DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f), + DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201), + DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000), + DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100), + DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200), + DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300), + DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000), + DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100), + DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200), + DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300), + DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500), + DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600), + DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700), + DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800), + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900), + DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000), + DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100), + DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200), + DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300), + DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400), + DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500), + DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800), + DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900), + DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00), + DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100), + DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300), + DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000), + DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001), + DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002), + DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004), +}; + +#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 + +/* DRV_MSG_CODE_RETAIN_VMAC parameters */ +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0 +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf + +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4 + +#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce + +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 + +#define BW_MAX_MASK 0x000000ff +#define BW_MAX_OFFSET 0 +#define BW_MIN_MASK 0x0000ff00 +#define BW_MIN_OFFSET 8 + +#define DRV_MSG_FAN_FAILURE_TYPE BIT(0) +#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1) + +#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F +#define RESOURCE_CMD_REQ_RESC_SHIFT 0 +#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 +#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 +#define RESOURCE_OPCODE_REQ 1 +#define RESOURCE_OPCODE_REQ_WO_AGING 2 +#define RESOURCE_OPCODE_REQ_W_AGING 3 +#define RESOURCE_OPCODE_RELEASE 4 +#define RESOURCE_OPCODE_FORCE_RELEASE 5 +#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 +#define RESOURCE_CMD_REQ_AGE_SHIFT 8 + +#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF +#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 +#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 +#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 +#define RESOURCE_OPCODE_GNT 1 +#define RESOURCE_OPCODE_BUSY 2 +#define RESOURCE_OPCODE_RELEASED 3 +#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 +#define RESOURCE_OPCODE_WRONG_OWNER 5 +#define RESOURCE_OPCODE_UNKNOWN_CMD 255 + +#define RESOURCE_DUMP 0 + +/* DRV_MSG_CODE_MDUMP_CMD parameters */ +#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff +#define DRV_MSG_CODE_MDUMP_ACK 0x01 +#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 +#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 +#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 +#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 +#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 +#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 +#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 + +#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a + +#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b +#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c +#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d + +/* DRV_MSG_CODE_MDUMP_CMD options */ +#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100 + +/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */ +#define DRV_MB_PARAM_ADDR_SHIFT 0 +#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF +#define DRV_MB_PARAM_DEVAD_SHIFT 16 +#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000 +#define DRV_MB_PARAM_PORT_SHIFT 21 +#define DRV_MB_PARAM_PORT_MASK 0x00600000 + +/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */ +#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0 +#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF +#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8 +#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300 +#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16 +#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000 + +/* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + +/* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + +/* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + +/* LLDP / DCBX params*/ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 +#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ + DRV_MB_PARAM_WOL_DISABLED | \ + DRV_MB_PARAM_WOL_ENABLED) +#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP +#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED +#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 + +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 + + /* Resource Allocation params - Driver version support */ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 + +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 + +/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff + +/* Driver attributes params */ +#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff +#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 +#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 + +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 + +/*DRV_MSG_CODE_GET_PERM_MAC parametres*/ +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00 + +#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET) +enum fw_msg_code_enum { + FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000), + FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001), + FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040), + FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011), + FW_MSG_CODE_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017), + FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002), + FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003), + FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080), + FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087), + FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010), + FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011), + FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012), + FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021), + FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023), + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030), + FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031), + FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110), + FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011), + FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012), + FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013), + FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110), + FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400), + FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500), + FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00), + FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001), + FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a), + FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b), +}; + +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +/* Get PF RDMA protocol command response */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + +/* Get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2) +#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3) +#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4) +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) +#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7) +#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8) +#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9) +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) +#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17) +#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18) +#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19) + +#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001 + +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) + +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 + +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 + +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, + MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE, + MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED, + MFW_DRV_MSG_GENERIC_IDC, + MFW_DRV_MSG_XCVR_TX_FAULT, + MFW_DRV_MSG_XCVR_RX_LOS, + MFW_DRV_MSG_GET_FCOE_CAP, + MFW_DRV_MSG_GEN_LINK_DUMP, + MFW_DRV_MSG_GEN_IDLE_CHK, + MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +struct public_mfw_mb { + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +enum public_sections { + PUBLIC_DRV_MB, + PUBLIC_MFW_MB, + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +/* Runtime data needs about 1/2K. We use 2K to be on the safe side. + * Please make sure data does not exceed this size. + */ +#define NUM_RUNTIME_DWORDS 16 +struct drv_init_hw_stc { + u32 init_hw_bitmask[NUM_RUNTIME_DWORDS]; + u32 init_hw_data[NUM_RUNTIME_DWORDS * 32]; +}; + +struct mcp_public_data { + u32 num_sections; + u32 sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; +}; + +#define I2C_TRANSCEIVER_ADDR 0xa0 +#define MAX_I2C_TRANSACTION_SIZE 16 +#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256 + +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT, + DRV_TLV_RDMA_DRV_VERSION +}; + +#define I2C_DEV_ADDR_A2 0xa2 +#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60 +#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2 +#define SFP_EEPROM_A2_VCC_ADDR 0x62 +#define SFP_EEPROM_A2_VCC_SIZE 2 +#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64 +#define SFP_EEPROM_A2_TX_BIAS_SIZE 2 +#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66 +#define SFP_EEPROM_A2_TX_POWER_SIZE 2 +#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68 +#define SFP_EEPROM_A2_RX_POWER_SIZE 2 + +#define I2C_DEV_ADDR_A0 0xa0 +#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16 +#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2 +#define QSFP_EEPROM_A0_VCC_ADDR 0x1a +#define QSFP_EEPROM_A0_VCC_SIZE 2 +#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a +#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2 +#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32 +#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2 +#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22 +#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2 + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + + u32 mac_addr_lo; +}; + +struct nvm_cfg1_glob { + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 + + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + + u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + + u32 mbi_date; + u32 misc_sig; + + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10 + + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_network_modes_capability; + u32 nvm_cfg_version; + u32 nvm_cfg_new_option_seq; + u32 nvm_cfg_removed_option_seq; + u32 nvm_cfg_updated_value_seq; + u32 extended_serial_number[8]; + u32 option_kit_pn[8]; + u32 spare_pn[8]; + u32 mps25_active_txfir_pre; + u32 mps25_active_txfir_main; + u32 mps25_active_txfir_post; + u32 features; + u32 tx_rx_eq_25g_hlpc; + u32 tx_rx_eq_25g_llpc; + u32 tx_rx_eq_25g_ac; + u32 tx_rx_eq_10g_pc; + u32 tx_rx_eq_10g_ac; + u32 tx_rx_eq_1g; + u32 tx_rx_eq_25g_bt; + u32 tx_rx_eq_10g_bt; + u32 generic_cont4; + u32 preboot_debug_mode_std; + u32 preboot_debug_mode_ext; + u32 ext_phy_cfg1; + u32 clocks; + u32 pre2_generic_cont_1; + u32 pre2_generic_cont_2; + u32 pre2_generic_cont_3; + u32 tx_rx_eq_50g_hlpc; + u32 tx_rx_eq_50g_mlpc; + u32 tx_rx_eq_50g_llpc; + u32 tx_rx_eq_50g_ac; + u32 trace_modules; + u32 pcie_class_code_fcoe; + u32 pcie_class_code_iscsi; + u32 no_provisioned_mac; + u32 lowest_mbi_version; + u32 generic_cont5; + u32 pre2_generic_cont_4; + u32 reserved[40]; +}; + +struct nvm_cfg1_path { + u32 reserved[1]; +}; + +struct nvm_cfg1_port { + u32 rel_to_opt123; + u32 rel_to_opt124; + + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + + u32 pcie_cfg; + u32 features; + + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 + + u32 phy_cfg; + u32 mgmt_traffic; + + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + + u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + + u32 temperature; + u32 ext_phy_cfg1; + + u32 extended_speed; +#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff +#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 + + u32 extended_fec_mode; + u32 port_generic_cont_01; + u32 port_generic_cont_02; + u32 phy_temp_monitor; + u32 reserved[109]; +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; + u32 rsrv1; + u32 rsrv2; + u32 device_id; + u32 cmn_cfg; + u32 pci_cfg; + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; + u32 preboot_generic_cfg; + u32 features; + u32 mf_mode_feature; + u32 reserved[6]; +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; +}; + +struct board_info { + u16 vendor_id; + u16 eth_did_suffix; + u16 sub_vendor_id; + u16 sub_device_id; + char *board_name; + char *friendly_name; +}; + +struct trace_module_info { + char *module_name; +}; + +#define NUM_TRACE_MODULES 25 + +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_2 2 +#define PORT_3 3 + +extern struct spad_layout g_spad; +struct spad_layout { + struct nvm_cfg nvm_cfg; + struct mcp_public_data public_data; +}; + +#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ + +#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE)) + +#define TO_OFFSIZE(_offset, _size) \ + ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \ + (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET))) + +enum spad_sections { + SPAD_SECTION_TRACE, + SPAD_SECTION_NVM_CFG, + SPAD_SECTION_PUBLIC, + SPAD_SECTION_PRIVATE, + SPAD_SECTION_MAX +}; + +#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \ + __builtin_offsetof(struct static_init, f)) + +/* This section is located at a fixed location in the beginning of the + * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. + * All the rest of data has a floating location which differs from version to + * version, and is pointed by the mcp_meta_data below. + * Moreover, the spad_layout section is part of the MFW firmware, and is loaded + * with it from nvram in order to clear this portion. + */ +struct static_init { + u32 num_sections; + offsize_t sections[SPAD_SECTION_MAX]; +#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) + + u32 tim_hash[8]; +#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash))) + u32 tpu_hash[8]; +#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash))) + u32 secure_pcie_fw_ver; +#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver)))) + u32 secure_running_mfw; +#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw)))) + struct mcp_trace trace; +}; + +#define CRC_MAGIC_VALUE 0xDEBB20E3 +#define CRC32_POLYNOMIAL 0xEDB88320 +#define _KB(x) ((x) * 1024) +#define _MB(x) (_KB(x) * 1024) +#define NVM_CRC_SIZE (sizeof(u32)) +enum nvm_sw_arbitrator { + NVM_SW_ARB_HOST, + NVM_SW_ARB_MCP, + NVM_SW_ARB_UART, + NVM_SW_ARB_RESERVED +}; + +struct legacy_bootstrap_region { + u32 magic_value; +#define NVM_MAGIC_VALUE 0x669955aa + u32 sram_start_addr; + u32 code_len; + u32 code_start_addr; + u32 crc; +}; + +struct nvm_code_entry { + u32 image_type; + u32 nvm_start_addr; + u32 len; + u32 sram_start_addr; + u32 sram_run_addr; +}; + +enum nvm_image_type { + NVM_TYPE_TIM1 = 0x01, + NVM_TYPE_TIM2 = 0x02, + NVM_TYPE_MIM1 = 0x03, + NVM_TYPE_MIM2 = 0x04, + NVM_TYPE_MBA = 0x05, + NVM_TYPE_MODULES_PN = 0x06, + NVM_TYPE_VPD = 0x07, + NVM_TYPE_MFW_TRACE1 = 0x08, + NVM_TYPE_MFW_TRACE2 = 0x09, + NVM_TYPE_NVM_CFG1 = 0x0a, + NVM_TYPE_L2B = 0x0b, + NVM_TYPE_DIR1 = 0x0c, + NVM_TYPE_EAGLE_FW1 = 0x0d, + NVM_TYPE_FALCON_FW1 = 0x0e, + NVM_TYPE_PCIE_FW1 = 0x0f, + NVM_TYPE_HW_SET = 0x10, + NVM_TYPE_LIM = 0x11, + NVM_TYPE_AVS_FW1 = 0x12, + NVM_TYPE_DIR2 = 0x13, + NVM_TYPE_CCM = 0x14, + NVM_TYPE_EAGLE_FW2 = 0x15, + NVM_TYPE_FALCON_FW2 = 0x16, + NVM_TYPE_PCIE_FW2 = 0x17, + NVM_TYPE_AVS_FW2 = 0x18, + NVM_TYPE_INIT_HW = 0x19, + NVM_TYPE_DEFAULT_CFG = 0x1a, + NVM_TYPE_MDUMP = 0x1b, + NVM_TYPE_NVM_META = 0x1c, + NVM_TYPE_ISCSI_CFG = 0x1d, + NVM_TYPE_FCOE_CFG = 0x1f, + NVM_TYPE_ETH_PHY_FW1 = 0x20, + NVM_TYPE_ETH_PHY_FW2 = 0x21, + NVM_TYPE_BDN = 0x22, + NVM_TYPE_8485X_PHY_FW = 0x23, + NVM_TYPE_PUB_KEY = 0x24, + NVM_TYPE_RECOVERY = 0x25, + NVM_TYPE_PLDM = 0x26, + NVM_TYPE_UPK1 = 0x27, + NVM_TYPE_UPK2 = 0x28, + NVM_TYPE_MASTER_KC = 0x29, + NVM_TYPE_BACKUP_KC = 0x2a, + NVM_TYPE_HW_DUMP = 0x2b, + NVM_TYPE_HW_DUMP_OUT = 0x2c, + NVM_TYPE_BIN_NVM_META = 0x30, + NVM_TYPE_ROM_TEST = 0xf0, + NVM_TYPE_88X33X0_PHY_FW = 0x31, + NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, + NVM_TYPE_IDLE_CHK = 0x33, + NVM_TYPE_MAX, +}; + +#define MAX_NVM_DIR_ENTRIES 100 + +struct nvm_dir_meta { + u32 dir_id; + u32 nvm_dir_addr; + u32 num_images; + u32 next_mfw_to_run; +}; + +struct nvm_dir { + s32 seq; +#define NVM_DIR_NEXT_MFW_MASK 0x00000001 +#define NVM_DIR_SEQ_MASK 0xfffffffe +#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) +#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\ + ({ \ + _seq = (((_seq + 2) & \ + NVM_DIR_SEQ_MASK) | \ + (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\ + }) + +#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \ + NVM_DIR_SEQ_MASK) + + u32 num_images; + u32 rsrv; + struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ +}; + +#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ + ((_num_images) - 1) *\ + sizeof(struct nvm_code_entry) +\ + NVM_CRC_SIZE) + +struct nvm_vpd_image { + u32 format_revision; +#define VPD_IMAGE_VERSION 1 + + u8 vpd_data[1]; +}; + +#define DIR_ID_1 (0) +#define DIR_ID_2 (1) +#define MAX_DIR_IDS (2) + +#define MFW_BUNDLE_1 (0) +#define MFW_BUNDLE_2 (1) +#define MAX_MFW_BUNDLES (2) + +#define FLASH_PAGE_SIZE 0x1000 +#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) +#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200)) + +#define FPGA_MIM_MAX_SIZE (0x40000) + +#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \ + sizeof(struct legacy_bootstrap_region) \ + - NVM_RSV_SIZE) +#define LIM_OFFSET (NVM_OFFSET(lim_image)) +#define NVM_RSV_SIZE (44) +#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE) +#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\ + + (((idx) == NVM_TYPE_MIM2) ? \ + GET_MIM_MAX_SIZE(is_asic, is_e4)\ + : 0)) +#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \ + GET_MIM_MAX_SIZE(is_asic,\ + is_e4) * 2) + +union nvm_dir_union { + struct nvm_dir dir; + u8 page[FLASH_PAGE_SIZE]; +}; + +struct nvm_image { + struct legacy_bootstrap_region bootstrap; + u8 rsrv[NVM_RSV_SIZE]; + u8 lim_image[LIM_MAX_SIZE]; + union nvm_dir_union dir[MAX_MFW_BUNDLES]; +}; + +#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f))))) + +struct hw_set_info { + u32 reg_type; +#define GRC_REG_TYPE 1 +#define PHY_REG_TYPE 2 +#define PCI_REG_TYPE 4 + + u32 bank_num; + u32 pf_num; + u32 operation; +#define READ_OP 1 +#define WRITE_OP 2 +#define RMW_SET_OP 3 +#define RMW_CLR_OP 4 + + u32 reg_addr; + u32 reg_data; + + u32 reset_type; +#define POR_RESET_TYPE BIT(0) +#define HARD_RESET_TYPE BIT(1) +#define CORE_RESET_TYPE BIT(2) +#define MCP_RESET_TYPE BIT(3) +#define PERSET_ASSERT BIT(4) +#define PERSET_DEASSERT BIT(5) +}; + +struct hw_set_image { + u32 format_version; +#define HW_SET_IMAGE_VERSION 1 + u32 no_hw_sets; + struct hw_set_info hw_sets[1]; +}; + +#define MAX_SUPPORTED_NVM_OPTIONS 1000 + +#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff +#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0 +#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000 +#define NVM_META_BIN_OPTION_LEN_OFFSET 16 +#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000 +#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24 +#define NVM_META_BIN_OPTION_ENTITY_GLOB 0 +#define NVM_META_BIN_OPTION_ENTITY_PORT 1 +#define NVM_META_BIN_OPTION_ENTITY_FUNC 2 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2 + +struct nvm_meta_bin_t { + u32 magic; +#define NVM_META_BIN_MAGIC 0x669955bb + u32 version; +#define NVM_META_BIN_VERSION 1 + u32 num_options; + u32 options[0]; +}; +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 4f4b79250a2b..7f3e84b8622d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -22,6 +22,7 @@ #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -33,7 +34,6 @@ #include "qed_roce.h" #include "qed_sp.h" - int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 max_count, char *name) { @@ -865,8 +865,8 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) } qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_COMMON_QUEUE_CONS, qz_num); REG_WR16(p_hwfn, addr, prod); @@ -1903,7 +1903,6 @@ void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); } - void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->db_bar_no_edpm = true; @@ -1966,7 +1965,7 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, u8 *old_mac_address, - u8 *new_mac_address) + const u8 *new_mac_address) { int rc = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6a1de3a25257..2753723011dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -168,16 +168,19 @@ static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp) return false; } + #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else -static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} -static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) + {return -EINVAL; } static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index da1b7fdcbda7..6f1a52e6beb2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -126,6 +126,8 @@ 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL +#define QM_REG_RLGLBLUPPERBOUND \ + 0x2f3c00UL #define TCFC_REG_WEAK_ENABLE_VF \ 0x2d0704UL #define TCFC_REG_STRONG_ENABLE_PF \ @@ -576,7 +578,7 @@ #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_VXLAN_PORT 0x1f0738UL -#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL #define NIG_REG_ENC_TYPE_ENABLE 0x501058UL #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) @@ -595,8 +597,8 @@ #define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL #define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL #define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL #define NIG_REG_NGE_IP_ENABLE 0x508b28UL #define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL @@ -606,7 +608,10 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL - +#define QM_REG_WFQVPUPPERBOUND \ + 0x2fb000UL +#define QM_REG_WFQVPCRD \ + 0x2fc000UL #define PGLCS_REG_DBG_SELECT_K2_E5 \ 0x001d14UL #define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \ @@ -1437,29 +1442,29 @@ 0x1401140UL #define XSEM_REG_SYNC_DBG_EMPTY \ 0x1401160UL -#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define XSEM_REG_SLOW_DBG_ACTIVE \ 0x1401400UL -#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define XSEM_REG_SLOW_DBG_MODE \ 0x1401404UL -#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define XSEM_REG_DBG_FRAME_MODE \ 0x1401408UL #define XSEM_REG_DBG_GPRE_VECT \ 0x1401410UL -#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define XSEM_REG_DBG_MODE1_CFG \ 0x1401420UL #define XSEM_REG_FAST_MEMORY \ 0x1440000UL #define YSEM_REG_SYNC_DBG_EMPTY \ 0x1501160UL -#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define YSEM_REG_SLOW_DBG_ACTIVE \ 0x1501400UL -#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define YSEM_REG_SLOW_DBG_MODE \ 0x1501404UL -#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define YSEM_REG_DBG_FRAME_MODE \ 0x1501408UL #define YSEM_REG_DBG_GPRE_VECT \ 0x1501410UL -#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define YSEM_REG_DBG_MODE1_CFG \ 0x1501420UL #define YSEM_REG_FAST_MEMORY \ 0x1540000UL @@ -1467,15 +1472,15 @@ 0x1601140UL #define PSEM_REG_SYNC_DBG_EMPTY \ 0x1601160UL -#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define PSEM_REG_SLOW_DBG_ACTIVE \ 0x1601400UL -#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define PSEM_REG_SLOW_DBG_MODE \ 0x1601404UL -#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define PSEM_REG_DBG_FRAME_MODE \ 0x1601408UL #define PSEM_REG_DBG_GPRE_VECT \ 0x1601410UL -#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define PSEM_REG_DBG_MODE1_CFG \ 0x1601420UL #define PSEM_REG_FAST_MEMORY \ 0x1640000UL @@ -1483,15 +1488,15 @@ 0x1701140UL #define TSEM_REG_SYNC_DBG_EMPTY \ 0x1701160UL -#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define TSEM_REG_SLOW_DBG_ACTIVE \ 0x1701400UL -#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define TSEM_REG_SLOW_DBG_MODE \ 0x1701404UL -#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define TSEM_REG_DBG_FRAME_MODE \ 0x1701408UL #define TSEM_REG_DBG_GPRE_VECT \ 0x1701410UL -#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define TSEM_REG_DBG_MODE1_CFG \ 0x1701420UL #define TSEM_REG_FAST_MEMORY \ 0x1740000UL @@ -1499,15 +1504,15 @@ 0x1801140UL #define MSEM_REG_SYNC_DBG_EMPTY \ 0x1801160UL -#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define MSEM_REG_SLOW_DBG_ACTIVE \ 0x1801400UL -#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define MSEM_REG_SLOW_DBG_MODE \ 0x1801404UL -#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define MSEM_REG_DBG_FRAME_MODE \ 0x1801408UL #define MSEM_REG_DBG_GPRE_VECT \ 0x1801410UL -#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define MSEM_REG_DBG_MODE1_CFG \ 0x1801420UL #define MSEM_REG_FAST_MEMORY \ 0x1840000UL @@ -1517,21 +1522,21 @@ 20480 #define USEM_REG_SYNC_DBG_EMPTY \ 0x1901160UL -#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define USEM_REG_SLOW_DBG_ACTIVE \ 0x1901400UL -#define USEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define USEM_REG_SLOW_DBG_MODE \ 0x1901404UL -#define USEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define USEM_REG_DBG_FRAME_MODE \ 0x1901408UL #define USEM_REG_DBG_GPRE_VECT \ 0x1901410UL -#define USEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define USEM_REG_DBG_MODE1_CFG \ 0x1901420UL #define USEM_REG_FAST_MEMORY \ 0x1940000UL #define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \ 0x000748UL -#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \ +#define SEM_FAST_REG_DBG_MODSRC_DISABLE \ 0x00074cUL #define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \ 0x000750UL @@ -1561,7 +1566,7 @@ 0x341500UL #define BRB_REG_BIG_RAM_DATA_SIZE \ 64 -#define SEM_FAST_REG_STALL_0_BB_K2 \ +#define SEM_FAST_REG_STALL_0 \ 0x000488UL #define SEM_FAST_REG_STALLED \ 0x000494UL @@ -1619,35 +1624,35 @@ 0x008c14UL #define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ 0x0006c4UL -#define MS_REG_MS_CMU_K2_E5 \ +#define MS_REG_MS_CMU_K2 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_REG_PHY0_K2_E5 \ +#define PHY_PCIE_REG_PHY0_K2 \ 0x620000UL -#define PHY_PCIE_REG_PHY1_K2_E5 \ +#define PHY_PCIE_REG_PHY1_K2 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index cf5baa5e59bc..071b4aeaddf2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -792,7 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->orq_num_pages * RDMA_RING_PAGE_SIZE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index e27dd9a4547e..7a3bd749e1e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -6,47 +6,47 @@ #include <linux/types.h> /** - * @brief qed_selftest_memory - Perform memory test + * qed_selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_memory(struct qed_dev *cdev); /** - * @brief qed_selftest_interrupt - Perform interrupt test + * qed_selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_interrupt(struct qed_dev *cdev); /** - * @brief qed_selftest_register - Perform register test + * qed_selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_register(struct qed_dev *cdev); /** - * @brief qed_selftest_clock - Perform clock test + * qed_selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_clock(struct qed_dev *cdev); /** - * @brief qed_selftest_nvram - Perform nvram test + * qed_selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_nvram(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 60ff3222bf55..4fb02a5579ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -23,31 +23,26 @@ enum spq_mode { }; struct qed_spq_comp_cb { - void (*function)(struct qed_hwfn *, - void *, - union event_ring_data *, + void (*function)(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, u8 fw_return_code); void *cookie; }; /** - * @brief qed_eth_cqe_completion - handles the completion of a - * ramrod on the cqe ring + * qed_eth_cqe_completion(): handles the completion of a + * ramrod on the cqe ring. * - * @param p_hwfn - * @param cqe + * @p_hwfn: HW device data. + * @cqe: CQE. * - * @return int + * Return: Int. */ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe); -/** - * @file - * - * QED Slow-hwfn queue interface - */ - + /* QED Slow-hwfn queue interface */ union ramrod_data { struct pf_start_ramrod_data pf_start; struct pf_update_ramrod_data pf_update; @@ -58,7 +53,7 @@ union ramrod_data { struct tx_queue_stop_ramrod_data tx_queue_stop; struct vport_start_ramrod_data vport_start; struct vport_stop_ramrod_data vport_stop; - struct rx_update_gft_filter_data rx_update_gft; + struct rx_update_gft_filter_ramrod_data rx_update_gft; struct vport_update_ramrod_data vport_update; struct core_rx_start_ramrod_data core_rx_queue_start; struct core_rx_stop_ramrod_data core_rx_queue_stop; @@ -207,117 +202,128 @@ struct qed_spq { }; /** - * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that - * Pends it to the future list. + * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. * - * @param p_hwfn - * @param p_req + * @p_hwfn: HW device data. + * @p_ent: Ent. + * @fw_return_code: Return code from firmware. * - * @return int + * Return: Int. */ int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code); /** - * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. + * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_spq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_setup - Reset the SPQ to its start state. + * qed_spq_setup(): Reset the SPQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_spq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_deallocate - Deallocates the given SPQ struct. + * qed_spq_free(): Deallocates the given SPQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_spq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_get_entry - Obtain an entrry from the spq - * free pool list. - * - * + * qed_spq_get_entry(): Obtain an entrry from the spq + * free pool list. * - * @param p_hwfn - * @param pp_ent + * @p_hwfn: HW device data. + * @pp_ent: PP ENT. * - * @return int + * Return: Int. */ int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent); /** - * @brief qed_spq_return_entry - Return an entry to spq free - * pool list + * qed_spq_return_entry(): Return an entry to spq free pool list. * - * @param p_hwfn - * @param p_ent + * @p_hwfn: HW device data. + * @p_ent: P ENT. + * + * Return: Void. */ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); /** - * @brief qed_eq_allocate - Allocates & initializes an EQ struct + * qed_eq_alloc(): Allocates & initializes an EQ struct. * - * @param p_hwfn - * @param num_elem number of elements in the eq + * @p_hwfn: HW device data. + * @num_elem: number of elements in the eq. * - * @return int + * Return: Int. */ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); /** - * @brief qed_eq_setup - Reset the EQ to its start state. + * qed_eq_setup(): Reset the EQ to its start state. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_eq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_free - deallocates the given EQ struct. + * qed_eq_free(): deallocates the given EQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_eq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_prod_update - update the FW with default EQ producer + * qed_eq_prod_update(): update the FW with default EQ producer. + * + * @p_hwfn: HW device data. + * @prod: Prod. * - * @param p_hwfn - * @param prod + * Return: Void. */ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod); /** - * @brief qed_eq_completion - Completes currently pending EQ elements + * qed_eq_completion(): Completes currently pending EQ elements. * - * @param p_hwfn - * @param cookie + * @p_hwfn: HW device data. + * @cookie: Cookie. * - * @return int + * Return: Int. */ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_spq_completion - Completes a single event + * qed_spq_completion(): Completes a single event. * - * @param p_hwfn - * @param echo - echo value from cookie (used for determining completion) - * @param p_data - data from cookie (used in callback function if applicable) + * @p_hwfn: HW device data. + * @echo: echo value from cookie (used for determining completion). + * @fw_return_code: FW return code. + * @p_data: data from cookie (used in callback function if applicable). * - * @return int + * Return: Int. */ int qed_spq_completion(struct qed_hwfn *p_hwfn, __le16 echo, @@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, union event_ring_data *p_data); /** - * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u32 - SPQ CID + * Return: u32 - SPQ CID. */ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_alloc - Allocates & initializes an ConsQ - * struct + * qed_consq_alloc(): Allocates & initializes an ConsQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_consq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_setup - Reset the ConsQ to its start state. + * qed_consq_setup(): Reset the ConsQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return Void. */ void qed_consq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_free - deallocates the given ConsQ struct. + * qed_consq_free(): deallocates the given ConsQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_consq_free(struct qed_hwfn *p_hwfn); int qed_spq_pend_post(struct qed_hwfn *p_hwfn); -/** - * @file - * - * @brief Slow-hwfn low-level commands (Ramrods) function definitions. - */ +/* Slow-hwfn low-level commands (Ramrods) function definitions. */ #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 @@ -377,12 +382,15 @@ struct qed_sp_init_data { }; /** - * @brief Returns a SPQ entry to the pool / frees the entry if allocated. - * Should be called on in error flows after initializing the SPQ entry - * and before posting it. + * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the + * entry if allocated. Should be called on in error + * flows after initializing the SPQ entry + * and before posting it. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. * - * @param p_hwfn - * @param p_ent + * Return: Void. */ void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); @@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_sp_init_data *p_data); /** - * @brief qed_sp_pf_start - PF Function Start Ramrod + * qed_sp_pf_start(): PF Function Start Ramrod. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_tunn: P_tunn. + * @allow_npar_tx_switch: Allow NPAR TX Switch. + * + * Return: Int. * * This ramrod is sent to initialize a physical function (PF). It will * configure the function related parameters and write its completion to the @@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * allocated by the driver on host memory and its parameters are written * to the internal RAM of the UStorm by the Function Start Ramrod. * - * @param p_hwfn - * @param p_ptt - * @param p_tunn - * @param allow_npar_tx_switch - * - * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, @@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, bool allow_npar_tx_switch); /** - * @brief qed_sp_pf_update - PF Function Update Ramrod + * qed_sp_pf_update(): PF Function Update Ramrod. * - * This ramrod updates function-related parameters. Every parameter can be - * updated independently, according to configuration flags. + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. * - * @return int + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. */ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_update_stag - Update firmware of new outer tag + * qed_sp_pf_update_stag(): Update firmware of new outer tag. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_stop - PF Function Stop Ramrod - * - * This ramrod is sent to close a Physical Function (PF). It is the last ramrod - * sent and the last completion written to the PFs Event Ring. This ramrod also - * deletes the context for the Slowhwfn connection on this PF. - * - * @note Not required for first packet. - * - * @param p_hwfn - * - * @return int - */ - -/** - * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * qed_sp_pf_update_ufp(): PF ufp update Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); @@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * qed_sp_heartbeat_ramrod(): Send empty Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index b4ed54ffef9b..648176dfb871 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; - DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + + /* Place consolidation queue address in ramrod */ + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); + p_ramrod->consolid_q_num_pages = page_cnt; qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); @@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; - p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; - p_ramrod->num_vfs = (u8) p_iov->total_vfs; + p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8)p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0bc1a0aeb56e..e0473729b161 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -20,6 +20,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_iscsi.h" @@ -31,8 +32,8 @@ #include "qed_rdma.h" /*************************************************************************** -* Structures & Definitions -***************************************************************************/ + * Structures & Definitions + ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) @@ -42,8 +43,8 @@ #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** -* Blocking Imp. (BLOCK/EBLOCK mode) -***************************************************************************/ + * Blocking Imp. (BLOCK/EBLOCK mode) + ***************************************************************************/ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, void *cookie, union event_ring_data *data, u8 fw_return_code) @@ -149,8 +150,8 @@ err: } /*************************************************************************** -* SPQ entries inner API -***************************************************************************/ + * SPQ entries inner API + ***************************************************************************/ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { @@ -184,12 +185,12 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* HSI access -***************************************************************************/ + * HSI access + ***************************************************************************/ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_cxt_info cxt_info; u16 physical_q; int rc; @@ -207,23 +208,20 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->xstorm_ag_context.flags10, - E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags1, - E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags9, - E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); - p_cxt->xstorm_st_context.spq_base_lo = + p_cxt->xstorm_st_context.spq_base_addr.lo = DMA_LO_LE(p_spq->chain.p_phys_addr); - p_cxt->xstorm_st_context.spq_base_hi = + p_cxt->xstorm_st_context.spq_base_addr.hi = DMA_HI_LE(p_spq->chain.p_phys_addr); - - DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, - p_hwfn->p_consq->chain.p_phys_addr); } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, @@ -265,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Asynchronous events -***************************************************************************/ + * Asynchronous events + ***************************************************************************/ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) @@ -311,12 +309,12 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* EQ API -***************************************************************************/ + * EQ API + ***************************************************************************/ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) { - u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); + u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_EQE_CONS, p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); } @@ -433,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn) } /*************************************************************************** -* CQE API - manipulate EQ functionality -***************************************************************************/ + * CQE API - manipulate EQ functionality + ***************************************************************************/ static int qed_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) @@ -464,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Slow hwfn Queue (spq) -***************************************************************************/ + * Slow hwfn Queue (spq) + ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; @@ -548,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) int ret; /* SPQ struct */ - p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); + p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; @@ -676,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { - if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; @@ -725,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Accessor -***************************************************************************/ + * Accessor + ***************************************************************************/ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_spq) @@ -735,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) } /*************************************************************************** -* Posting new Ramrods -***************************************************************************/ + * Posting new Ramrods + ***************************************************************************/ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, struct list_head *head, u32 keep_reserve) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ed2b6fe5a78d..8ac38828ba45 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -11,6 +11,7 @@ #include <linux/qed/qed_iov_if.h> #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -19,12 +20,13 @@ #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, - __le16 echo, - union event_ring_data *data, u8 fw_return_code); static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); +static u16 qed_vf_from_entity_id(__le16 entity_id) +{ + return le16_to_cpu(entity_id) - MAX_NUM_PFS; +} + static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { u8 legacy = 0; @@ -169,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, b_enabled_only, false)) vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; else - DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", - relative_vf_id); + DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n", + __func__, relative_vf_id); return vf; } @@ -308,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, struct qed_dmae_params params; struct qed_vf_info *p_vf; - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return -EINVAL; @@ -420,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) bulletin_p = p_iov_info->bulletins_phys; if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { DP_ERR(p_hwfn, - "qed_iov_setup_vfdb called without allocating mem first\n"); + "%s called without allocating mem first\n", __func__); return; } @@ -464,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); + "%s for %d VFs\n", __func__, num_vfs); /* Allocate PF Mailbox buffer (per-VF) */ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; @@ -500,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) QED_MSG_IOV, "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", p_iov_info->mbx_msg_virt_addr, - (u64) p_iov_info->mbx_msg_phys_addr, + (u64)p_iov_info->mbx_msg_phys_addr, p_iov_info->mbx_reply_virt_addr, - (u64) p_iov_info->mbx_reply_phys_addr, - p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); + (u64)p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); return 0; } @@ -608,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) if (rc) return rc; - /* We want PF IOV to be synonemous with the existance of p_iov_info; + /* We want PF IOV to be synonemous with the existence of p_iov_info; * In case the capability is published but there are no VFs, simply * de-allocate the struct. */ @@ -714,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, int i; /* Set VF masks and configuration - pretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); /* iterate over all queues, clear sb consumer */ for (i = 0; i < vf->num_sbs; i++) @@ -734,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, { u32 igu_vf_conf; - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); @@ -746,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); } static int @@ -807,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, if (rc) return rc; - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); @@ -816,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.hw_mode); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); vf->state = VF_FREE; @@ -904,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, p_block->igu_sb_id * sizeof(u64), 2, NULL); } - vf->num_sbs = (u8) num_rx_queues; + vf->num_sbs = (u8)num_rx_queues; return vf->num_sbs; } @@ -988,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); if (!vf) { - DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } @@ -1092,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!vf) { - DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } @@ -1220,8 +1222,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn, * channel would be re-set to ready prior to that. */ REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1); qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, mbx->req_virt->first_tlv.reply_address, @@ -1545,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, memset(resp, 0, sizeof(*resp)); /* Write the PF version so that VF would know which version - * is supported - might be later overriden. This guarantees that + * is supported - might be later overridden. This guarantees that * VF could recognize legacy PF based on lack of versions in reply. */ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; @@ -1603,7 +1605,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->db_size = 0; - pfdev_info->indices_per_sb = PIS_PER_SB_E4; + pfdev_info->indices_per_sb = PIS_PER_SB; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; @@ -1897,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, int sb_id; int rc; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Failed to get VF info, invalid vfid [%d]\n", @@ -1957,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { DP_ERR(p_hwfn, - "qed_iov_vf_mbx_start_vport returned error %d\n", rc); + "%s returned error %d\n", __func__, rc); status = PFVF_STATUS_FAILURE; } else { vf->vport_instance++; @@ -1993,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); if (rc) { - DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", - rc); + DP_ERR(p_hwfn, "%s returned error %d\n", + __func__, rc); status = PFVF_STATUS_FAILURE; } @@ -2138,10 +2140,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, * calculate on their own and clean the producer prior to this. */ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) - REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), - 0); + qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY + + SEM_FAST_REG_INT_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, + req->rx_qid), 0); rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, req->bd_max_bytes, @@ -3030,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, goto out; } p_rss_params = vzalloc(sizeof(*p_rss_params)); - if (p_rss_params == NULL) { + if (!p_rss_params) { status = PFVF_STATUS_FAILURE; goto out; } @@ -3550,6 +3552,7 @@ out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, sizeof(struct pfvf_def_resp_tlv), status); } + static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) @@ -3557,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, int cnt; u32 val; - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); for (cnt = 0; cnt < 50; cnt++) { val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); @@ -3565,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, break; msleep(20); } - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); if (cnt == 50) { DP_ERR(p_hwfn, @@ -3577,48 +3580,73 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, return 0; } +#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) + static int qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { - u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; - int i, cnt; + u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; + u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; + u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; + u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; + u8 port_id, tc, tc_id = 0, voq = 0; + int cnt; - /* Read initial consumers & producers */ - for (i = 0; i < MAX_NUM_VOQS_E4; i++) { - u32 prod; + memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); + memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); - cons[i] = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + - i * 0x40); - prod = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + - i * 0x40); - distance[i] = prod - cons[i]; + /* Read initial consumers & producers */ + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; + voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); + cons[voq] = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + prod = qed_rd(p_hwfn, p_ptt, + prod_voq0_addr + voq * 0x40); + distance[voq] = prod - cons[voq]; + } } /* Wait for consumers to pass the producers */ - i = 0; + port_id = 0; + tc = 0; for (cnt = 0; cnt < 50; cnt++) { - for (; i < MAX_NUM_VOQS_E4; i++) { - u32 tmp; + for (; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? + tc : PURE_LB_TC; + voq = VOQ(port_id, + tc_id, max_phys_tcs_per_port); + tmp = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + if (distance[voq] > tmp - cons[voq]) + break; + } - tmp = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + - i * 0x40); - if (distance[i] > tmp - cons[i]) + if (tc == max_phys_tcs_per_port + 1) + tc = 0; + else break; } - if (i == MAX_NUM_VOQS_E4) + if (port_id == max_ports_per_engine) break; msleep(20); } if (cnt == 50) { - DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", - p_vf->abs_vf_id, i); + DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n", + p_vf->abs_vf_id, (int)voq); + + DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n", + (int)voq, (int)port_id, (int)tc_id); + return -EBUSY; } @@ -3680,8 +3708,8 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, * doesn't do that as a part of FLR. */ REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, vfid), 1); /* VF_STOPPED has to be set only after final cleanup * but prior to re-enabling the VF. @@ -3842,7 +3870,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx; struct qed_vf_info *p_vf; - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return; @@ -3979,7 +4007,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, u16 abs_vfid) { - u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { DP_VERBOSE(p_hwfn, @@ -3989,7 +4017,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, return NULL; } - return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; + return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; } static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, @@ -4013,13 +4041,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, return 0; } -static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, - struct malicious_vf_eqe_data *p_data) +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) { struct qed_vf_info *p_vf; - p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); - + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id + (p_data->entity_id)); if (!p_vf) return; @@ -4036,16 +4064,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, - union event_ring_data *data, u8 fw_return_code) +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), &data->vf_pf_channel.msg_addr); - case COMMON_EVENT_MALICIOUS_VF: - qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); - return 0; default: DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", opcode); @@ -4075,7 +4100,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, struct qed_dmae_params params; struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return -EINVAL; @@ -4176,7 +4201,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf_info; u64 feature; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set forced MAC, invalid vfid [%d]\n", vfid); @@ -4226,7 +4251,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return false; @@ -4237,7 +4262,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return true; @@ -4248,7 +4273,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return false; @@ -4266,7 +4291,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) goto out; } - vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf) goto out; @@ -4345,7 +4370,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, return rc; rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ - return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); + return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val, + QM_RL_TYPE_NORMAL); } static int @@ -4376,7 +4402,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) struct qed_wfq_data *vf_vp_wfq; struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return 0; @@ -4395,8 +4421,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) */ void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) { + /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(flag, &hwfn->iov_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); @@ -4407,8 +4435,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev) int i; for_each_hwfn(cdev, i) - queue_delayed_work(cdev->hwfns[i].iov_wq, - &cdev->hwfns[i].iov_task, 0); + queue_delayed_work(cdev->hwfns[i].iov_wq, + &cdev->hwfns[i].iov_task, 0); } int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) @@ -4416,8 +4444,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) int i, j; for_each_hwfn(cdev, i) - if (cdev->hwfns[i].iov_wq) - flush_workqueue(cdev->hwfns[i].iov_wq); + if (cdev->hwfns[i].iov_wq) + flush_workqueue(cdev->hwfns[i].iov_wq); /* Mark VFs for disablement */ qed_iov_set_vfs_to_disable(cdev, true); @@ -5010,7 +5038,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) } qed_for_each_vf(hwfn, i) - qed_iov_post_vf_bulletin(hwfn, i, ptt); + qed_iov_post_vf_bulletin(hwfn, i, ptt); qed_ptt_release(hwfn, ptt); } @@ -5196,7 +5224,6 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); } - flush_workqueue(cdev->hwfns[i].iov_wq); destroy_workqueue(cdev->hwfns[i].iov_wq); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index eacd6457f195..f448e3dd6c8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -142,7 +142,7 @@ struct qed_vf_queue { enum vf_state { VF_FREE = 0, /* VF ready to be acquired holds no resc */ - VF_ACQUIRED, /* VF, acquired, but not initalized */ + VF_ACQUIRED, /* VF, acquired, but not initialized */ VF_ENABLED, /* VF, Enabled */ VF_RESET, /* VF, FLR'd, pending cleanup */ VF_STOPPED /* VF, Stopped */ @@ -250,29 +250,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; #ifdef CONFIG_QED_SRIOV /** - * @brief Check if given VF ID @vfid is valid - * w.r.t. @b_enabled_only value - * if b_enabled_only = true - only enabled VF id is valid - * else any VF id less than max_vfs is valid + * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled + * VF id is valid. + * else any VF id less than max_vfs is valid. * - * @param p_hwfn - * @param rel_vf_id - Relative VF ID - * @param b_enabled_only - consider only enabled VF - * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * @p_hwfn: HW device data. + * @rel_vf_id: Relative VF ID. + * @b_enabled_only: consider only enabled VF. + * @b_non_malicious: true iff we want to validate vf isn't malicious. * - * @return bool - true for valid VF ID + * Return: bool - true for valid VF ID */ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only, bool b_non_malicious); /** - * @brief - Given a VF index, return index of next [including that] active VF. + * qed_iov_get_next_active_vf(): Given a VF index, return index of + * next [including that] active VF. * - * @param p_hwfn - * @param rel_vf_id + * @p_hwfn: HW device data. + * @rel_vf_id: VF ID. * - * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. */ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); @@ -280,83 +282,117 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port); /** - * @brief Read sriov related information and allocated resources - * reads from configuration space, shmem, etc. + * qed_iov_hw_info(): Read sriov related information and allocated resources + * reads from configuration space, shmem, etc. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_hw_info(struct qed_hwfn *p_hwfn); /** - * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * qed_add_tlv(): place a given tlv on the tlv buffer at next offset * - * @param p_hwfn - * @param p_iov - * @param type - * @param length + * @p_hwfn: HW device data. + * @offset: offset. + * @type: Type + * @length: Length. * - * @return pointer to the newly placed tlv + * Return: pointer to the newly placed tlv */ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); /** - * @brief list the types and lengths of the tlvs on the buffer + * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer * - * @param p_hwfn - * @param tlvs_list + * @p_hwfn: HW device data. + * @tlvs_list: Tlvs_list. + * + * Return: Void. */ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); /** - * @brief qed_iov_alloc - allocate sriov related resources + * qed_sriov_vfpf_malicious(): Handle malicious VF/PF. + * + * @p_hwfn: HW device data. + * @p_data: Pointer to data. + * + * Return: Void. + */ +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data); + +/** + * qed_sriov_eqe_event(): Callback for SRIOV events. + * + * @p_hwfn: HW device data. + * @opcode: Opcode. + * @echo: Echo. + * @data: data + * @fw_return_code: FW return code. + * + * Return: Int. + */ +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code); + +/** + * qed_iov_alloc(): allocate sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_setup - setup sriov related resources + * qed_iov_setup(): setup sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_iov_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_free - free sriov related resources + * qed_iov_free(): free sriov related resources + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_iov_free(struct qed_hwfn *p_hwfn); /** - * @brief free sriov related memory that was allocated during hw_prepare + * qed_iov_free_hw_info(): free sriov related memory that was + * allocated during hw_prepare + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_iov_free_hw_info(struct qed_dev *cdev); /** - * @brief Mark structs of vfs that have been FLR-ed. + * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. * - * @param p_hwfn - * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * @p_hwfn: HW device data. + * @disabled_vfs: bitmask of all VFs on path that were FLRed * - * @return true iff one of the PF's vfs got FLRed. false otherwise. + * Return: true iff one of the PF's vfs got FLRed. false otherwise. */ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); /** - * @brief Search extended TLVs in request/reply buffer. + * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. * - * @param p_hwfn - * @param p_tlvs_list - Pointer to tlvs list - * @param req_type - Type of TLV + * @p_hwfn: HW device data. + * @p_tlvs_list: Pointer to tlvs list + * @req_type: Type of TLV * - * @return pointer to tlv type if found, otherwise returns NULL. + * Return: pointer to tlv type if found, otherwise returns NULL. */ void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type); @@ -442,6 +478,18 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { } + +static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) +{ +} + +static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + return 0; +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72a38d53d33f..597cd9cd57b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -27,7 +27,7 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) "preparing to send 0x%04x tlv over vf pf channel\n", type); - /* Reset Requst offset */ + /* Reset Request offset */ p_iov->offset = (u8 *)p_iov->vf2pf_request; /* Clear mailbox - both request and reply */ @@ -444,7 +444,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) u32 reg; int rc; - /* Set number of hwfns - might be overriden once leading hwfn learns + /* Set number of hwfns - might be overridden once leading hwfn learns * actual configuration from PF. */ if (IS_LEAD_HWFN(p_hwfn)) @@ -504,7 +504,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) QED_MSG_IOV, "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", p_iov->vf2pf_request, - (u64) p_iov->vf2pf_request_phys, + (u64)p_iov->vf2pf_request_phys, p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); /* Allocate Bulletin board */ @@ -561,6 +561,7 @@ free_p_iov: return -ENOMEM; } + #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) @@ -1285,8 +1286,8 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); - req->opcode = (u8) p_ucast->opcode; - req->type = (u8) p_ucast->type; + req->opcode = (u8)p_ucast->opcode; + req->type = (u8)p_ucast->type; memcpy(req->mac, p_ucast->mac, ETH_ALEN); req->vlan = p_ucast->vlan; @@ -1372,7 +1373,7 @@ exit: int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, - u8 *p_mac) + const u8 *p_mac) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_bulletin_update_mac_tlv *p_req; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 60d2bb64e65f..306b5f4bc632 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -48,7 +48,7 @@ struct channel_tlv { u16 length; }; -/* header of first vf->pf tlv carries the offset used to calculate reponse +/* header of first vf->pf tlv carries the offset used to calculate response * buffer address */ struct vfpf_first_tlv { @@ -85,8 +85,8 @@ struct vfpf_acquire_tlv { struct vfpf_first_tlv first_tlv; struct vf_pf_vfdev_info { -#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ -#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */ +#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */ /* A requirement for supporting multi-Tx queues on a single queue-zone, * VF would pass qids as additional information whenever passing queue * references. @@ -688,13 +688,16 @@ struct qed_vf_iov { }; /** - * @brief VF - Set Rx/Tx coalesce per VF's relative queue. - * Coalesce value '0' will omit the configuration. + * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the + * configuration. * - * @param p_hwfn - * @param rx_coal - coalesce value in micro second for rx queue - * @param tx_coal - coalesce value in micro second for tx queue - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @rx_coal: coalesce value in micro second for rx queue. + * @tx_coal: coalesce value in micro second for tx queue. + * @p_cid: queue cid. + * + * Return: Int. * **/ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, @@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 tx_coal, struct qed_queue_cid *p_cid); /** - * @brief VF - Get coalesce per VF's relative queue. + * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. * - * @param p_hwfn - * @param p_coal - coalesce value in micro second for VF queues. - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @p_coal: coalesce value in micro second for VF queues. + * @p_cid: queue cid. * + * Return: Int. **/ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, struct qed_queue_cid *p_cid); #ifdef CONFIG_QED_SRIOV /** - * @brief Read the VF bulletin and act on it if needed + * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. * - * @param p_hwfn - * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * @p_hwfn: HW device data. + * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. * - * @return enum _qed_status + * Return: enum _qed_status. */ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); /** - * @brief Get link paramters for VF from qed + * qed_vf_get_link_params(): Get link parameters for VF from qed + * + * @p_hwfn: HW device data. + * @params: the link params structure to be filled for the VF. * - * @param p_hwfn - * @param params - the link params structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params); /** - * @brief Get link state for VF from qed + * qed_vf_get_link_state(): Get link state for VF from qed. + * + * @p_hwfn: HW device data. + * @link: the link state structure to be filled for the VF * - * @param p_hwfn - * @param link - the link state structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link); /** - * @brief Get link capabilities for VF from qed + * qed_vf_get_link_caps(): Get link capabilities for VF from qed. * - * @param p_hwfn - * @param p_link_caps - the link capabilities structure to be filled for the VF + * @p_hwfn: HW device data. + * @p_link_caps: the link capabilities structure to be filled for the VF + * + * Return: Void. */ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_rxqs: allocated RX queues * - * @param p_hwfn - * @param num_rxqs - allocated RX queues + * Return: Void. */ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed * - * @param p_hwfn - * @param num_txqs - allocated RX queues + * @p_hwfn: HW device data. + * @num_txqs: allocated RX queues + * + * Return: Void. */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); /** - * @brief Get number of available connections [both Rx and Tx] for VF + * qed_vf_get_num_cids(): Get number of available connections + * [both Rx and Tx] for VF + * + * @p_hwfn: HW device data. + * @num_cids: allocated number of connections * - * @param p_hwfn - * @param num_cids - allocated number of connections + * Return: Void. */ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); /** - * @brief Get port mac address for VF + * qed_vf_get_port_mac(): Get port mac address for VF. * - * @param p_hwfn - * @param port_mac - destination location for port mac + * @p_hwfn: HW device data. + * @port_mac: destination location for port mac + * + * Return: Void. */ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); /** - * @brief Get number of VLAN filters allocated for VF by qed + * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated + * for VF by qed. + * + * @p_hwfn: HW device data. + * @num_vlan_filters: allocated VLAN filters * - * @param p_hwfn - * @param num_rxqs - allocated VLAN filters + * Return: Void. */ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** - * @brief Get number of MAC filters allocated for VF by qed + * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated + * for VF by qed * - * @param p_hwfn - * @param num_rxqs - allocated MAC filters + * @p_hwfn: HW device data. + * @num_mac_filters: allocated MAC filters + * + * Return: Void. */ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); /** - * @brief Check if VF can set a MAC address + * qed_vf_check_mac(): Check if VF can set a MAC address * - * @param p_hwfn - * @param mac + * @p_hwfn: HW device data. + * @mac: Mac. * - * @return bool + * Return: bool. */ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); /** - * @brief Set firmware version information in dev_info from VFs acquire response tlv + * qed_vf_get_fw_version(): Set firmware version information + * in dev_info from VFs acquire response tlv + * + * @p_hwfn: HW device data. + * @fw_major: FW major. + * @fw_minor: FW minor. + * @fw_rev: FW rev. + * @fw_eng: FW eng. * - * @param p_hwfn - * @param fw_major - * @param fw_minor - * @param fw_rev - * @param fw_eng + * Return: Void. */ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng); /** - * @brief hw preparation for VF - * sends ACQUIRE message + * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** - * @brief VF - start the RX Queue by sending a message to the PF - * @param p_hwfn - * @param p_cid - Only relative fields are relevant - * @param bd_max_bytes - maximum number of bytes per bd - * @param bd_chain_phys_addr - physical address of bd chain - * @param cqe_pbl_addr - physical address of pbl - * @param cqe_pbl_size - pbl size - * @param pp_prod - pointer to the producer to be - * used in fastpath + * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF + * + * @p_hwfn: HW device data. + * @p_cid: Only relative fields are relevant + * @bd_max_bytes: maximum number of bytes per bd + * @bd_chain_phys_addr: physical address of bd chain + * @cqe_pbl_addr: physical address of pbl + * @cqe_pbl_size: pbl size + * @pp_prod: pointer to the producer to be used in fastpath * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, @@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod); /** - * @brief VF - start the TX queue by sending a message to the - * PF. + * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the + * PF. * - * @param p_hwfn - * @param tx_queue_id - zero based within the VF - * @param sb - status block for this queue - * @param sb_index - index within the status block - * @param bd_chain_phys_addr - physical address of tx chain - * @param pp_doorbell - pointer to address to which to - * write the doorbell too.. + * @p_hwfn: HW device data. + * @p_cid: CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL Size. + * @pp_doorbell: pointer to address to which to write the doorbell too. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, @@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell); /** - * @brief VF - stop the RX queue by sending a message to the PF + * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. * - * @param p_hwfn - * @param p_cid - * @param cqe_completion + * @p_hwfn: HW device data. + * @p_cid: CID. + * @cqe_completion: CQE Completion. * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion); /** - * @brief VF - stop the TX queue by sending a message to the PF + * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. * - * @param p_hwfn - * @param tx_qid + * @p_hwfn: HW device data. + * @p_cid: CID. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** - * @brief VF - send a vport update command + * qed_vf_pf_vport_update(): VF - send a vport update command. * - * @param p_hwfn - * @param params + * @p_hwfn: HW device data. + * @p_params: Params * - * @return int + * Return: Int. */ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params); /** + * qed_vf_pf_reset(): VF - send a close message to PF. * - * @brief VF - send a close message to PF + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); /** - * @brief VF - free vf`s memories + * qed_vf_pf_release(): VF - free vf`s memories. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); /** - * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous * - * @param p_hwfn - * @param sb_id + * @p_hwfn: HW device data. + * @sb_id: SB ID. * - * @return INLINE u16 + * Return: INLINE u16 */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief Stores [or removes] a configured sb_info. + * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info. + * + * @p_hwfn: HW device data. + * @sb_id: zero-based SB index [for fastpath] + * @p_sb: may be NULL [during removal]. * - * @param p_hwfn - * @param sb_id - zero-based SB index [for fastpath] - * @param sb_info - may be NULL [during removal]. + * Return: Void. */ void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, struct qed_sb_info *p_sb); /** - * @brief qed_vf_pf_vport_start - perform vport start for VF. + * qed_vf_pf_vport_start(): perform vport start for VF. * - * @param p_hwfn - * @param vport_id - * @param mtu - * @param inner_vlan_removal - * @param tpa_mode - * @param max_buffers_per_cqe, - * @param only_untagged - default behavior regarding vlan acceptance + * @p_hwfn: HW device data. + * @vport_id: Vport ID. + * @mtu: MTU. + * @inner_vlan_removal: Innter VLAN removal. + * @tpa_mode: TPA mode + * @max_buffers_per_cqe: Max buffer pre CQE. + * @only_untagged: default behavior regarding vlan acceptance * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, @@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 max_buffers_per_cqe, u8 only_untagged); /** - * @brief qed_vf_pf_vport_stop - stop the VF's vport + * qed_vf_pf_vport_stop(): stop the VF's vport * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); @@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd); /** - * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * qed_vf_pf_int_cleanup(): clean the SB of the VF * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); /** - * @brief - return the link params in a given bulletin board + * __qed_vf_get_link_params(): return the link params in a given bulletin board * - * @param p_hwfn - * @param p_params - pointer to a struct to fill with link params - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_params: pointer to a struct to fill with link params + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link state in a given bulletin board + * __qed_vf_get_link_state(): return the link state in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_link: pointer to a struct to fill with link state + * @p_bulletin: Bulletin. * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link state - * @param p_bulletin + * Return: Void. */ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link capabilities in a given bulletin board + * __qed_vf_get_link_caps(): return the link capabilities in a given + * bulletin board * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link capabilities - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_link_caps: pointer to a struct to fill with link capabilities + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, @@ -1029,11 +1062,15 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); /** - * @brief - Ask PF to update the MAC address in it's bulletin board + * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in + * it's bulletin board + * + * @p_hwfn: HW device data. + * @p_mac: mac address to be updated in bulletin board * - * @param p_mac - mac address to be updated in bulletin board + * Return: Int. */ -int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac); #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, @@ -1222,7 +1259,7 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, - u8 *p_mac) + const u8 *p_mac) { return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a2e4dfb5cb44..3010833ddde3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -557,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) return; } - ether_addr_copy(edev->ndev->dev_addr, mac); + eth_hw_addr_set(edev->ndev, mac); __qede_unlock(edev); } @@ -617,32 +617,30 @@ void qede_fill_rss_params(struct qede_dev *edev, static int qede_set_ucast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, - unsigned char mac[ETH_ALEN]) + const unsigned char mac[ETH_ALEN]) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.mac_valid = 1; + ether_addr_copy(ucast.mac, mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_set_ucast_rx_vlan(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, u16 vid) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.vlan_valid = 1; + ucast.vlan = vid; - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) @@ -1057,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char *mac, int num_macs) { - struct qed_filter_params filter_cmd; + struct qed_filter_mcast_params mcast; int i; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; + memset(&mcast, 0, sizeof(mcast)); + mcast.type = opcode; + mcast.num = num_macs; for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + ether_addr_copy(mcast.mac[i], mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_mcast(edev->cdev, &mcast); } int qede_set_mac_addr(struct net_device *ndev, void *p) @@ -1104,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) goto out; } - ether_addr_copy(ndev->dev_addr, addr->sa_data); + eth_hw_addr_set(ndev, addr->sa_data); DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data); if (edev->state != QEDE_STATE_OPEN) { @@ -1194,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev) { enum qed_filter_rx_mode_type accept_flags; struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; unsigned char *uc_macs, *temp; struct netdev_hw_addr *ha; int rc, uc_count; @@ -1220,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev) netif_addr_unlock_bh(ndev); - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - /* Remove all previous unicast secondary macs and multicast macs * (configure / leave the primary mac) */ @@ -1271,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev) qede_config_accept_any_vlan(edev, false); } - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); + edev->ops->filter_config_rx_mode(edev->cdev, accept_flags); out: kfree(uc_macs); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9837bdb89cd4..06c6a5813606 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -836,7 +836,7 @@ static void qede_init_ndev(struct qede_dev *edev) ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; /* Set network device HW mac */ - ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); + eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac); ndev->mtu = edev->dev_info.common.mtu; } @@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->devlink = qed_ops->common->devlink_register(cdev); if (IS_ERR(edev->devlink)) { DP_NOTICE(edev, "Cannot register devlink\n"); + rc = PTR_ERR(edev->devlink); edev->devlink = NULL; - /* Go on, we can live without devlink */ + goto err3; } } else { struct net_device *ndev = pci_get_drvdata(pdev); + struct qed_devlink *qdl; edev = netdev_priv(ndev); - - if (edev->devlink) { - struct qed_devlink *qdl = devlink_priv(edev->devlink); - - qdl->cdev = cdev; - } + qdl = devlink_priv(edev->devlink); + qdl->cdev = cdev; edev->cdev = cdev; memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); @@ -1397,7 +1395,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, static int qede_alloc_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int rc; @@ -2802,10 +2800,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data) } /** - * qede_io_error_detected - called when PCI error is detected + * qede_io_error_detected(): Called when PCI error is detected + * * @pdev: Pointer to PCI device * @state: The current pci connection state * + *Return: pci_ers_result_t. + * * This function is called after a PCI bus error affecting * this device has been detected. */ diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c00ad57575ea..1e6d72adfe43 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -508,10 +508,12 @@ static void eeprom_readword(struct ql3_adapter *qdev, static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) { - __le16 *p = (__le16 *)ndev->dev_addr; - p[0] = cpu_to_le16(addr[0]); - p[1] = cpu_to_le16(addr[1]); - p[2] = cpu_to_le16(addr[2]); + __le16 buf[ETH_ALEN / 2]; + + buf[0] = cpu_to_le16(addr[0]); + buf[1] = cpu_to_le16(addr[1]); + buf[2] = cpu_to_le16(addr[2]); + eth_hw_addr_set(ndev, (u8 *)buf); } static int ql_get_nvram_params(struct ql3_adapter *qdev) @@ -3564,7 +3566,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 75960a29f80e..ed84f0f97623 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -304,7 +304,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) if (ret) return ret; - memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, mac_addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ @@ -356,7 +356,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); qlcnic_set_multi(adapter->netdev); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 87b8c032195d..06104d2ff5b3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -420,7 +420,7 @@ static void emac_mac_dma_config(struct emac_adapter *adpt) } /* set MAC address */ -static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr) +static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr) { u32 sta; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 9015a38eaced..a55c52696d49 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -545,13 +545,10 @@ static int emac_probe_resources(struct platform_device *pdev, struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - char maddr[ETH_ALEN]; int ret = 0; /* get mac address */ - if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN)) - ether_addr_copy(netdev->dev_addr, maddr); - else + if (device_get_ethdev_address(&pdev->dev, netdev)) eth_hw_addr_random(netdev); /* Core 0 interrupt */ diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 8427fe1b8fd1..955cce644392 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -968,7 +968,7 @@ qca_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, qcaspi_devs); - ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr); + ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&spi->dev, "Using random MAC address: %pM\n", diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index ce3f7ce31adc..27c4f43176aa 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -347,7 +347,7 @@ static int qca_uart_probe(struct serdev_device *serdev) of_property_read_u32(serdev->dev.of_node, "current-speed", &speed); - ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr); + ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&serdev->dev, "Using random MAC address: %pM\n", diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 13d8eb43a485..1b2119b1d48a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -224,7 +224,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; - eth_random_addr(rmnet_dev->dev_addr); + eth_hw_addr_random(rmnet_dev); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 01ef5efd7bc2..a6bf7d505178 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -453,7 +453,7 @@ static void r6040_down(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - u16 *adrp; + const u16 *adrp; /* Stop MAC */ iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ @@ -462,7 +462,7 @@ static void r6040_down(struct net_device *dev) r6040_reset_mac(lp); /* Restore MAC Address to MIDx */ - adrp = (u16 *) dev->dev_addr; + adrp = (const u16 *) dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); @@ -731,13 +731,13 @@ static void r6040_mac_address(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - u16 *adrp; + const u16 *adrp; /* Reset MAC */ r6040_reset_mac(lp); /* Restore MAC Address */ - adrp = (u16 *) dev->dev_addr; + adrp = (const u16 *) dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); @@ -849,13 +849,13 @@ static void r6040_multicast_list(struct net_device *dev) unsigned long flags; struct netdev_hw_addr *ha; int i; - u16 *adrp; + const u16 *adrp; u16 hash_table[4] = { 0 }; spin_lock_irqsave(&lp->lock, flags); /* Keep our MAC Address */ - adrp = (u16 *)dev->dev_addr; + adrp = (const u16 *)dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); @@ -1031,8 +1031,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioaddr; int err, io_size = R6040_IO_SIZE; static int card_idx = -1; + u16 addr[ETH_ALEN / 2]; int bar = 0; - u16 *adrp; pr_info("%s\n", version); @@ -1102,14 +1102,14 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Set MAC address */ card_idx++; - adrp = (u16 *)dev->dev_addr; - adrp[0] = ioread16(ioaddr + MID_0L); - adrp[1] = ioread16(ioaddr + MID_0M); - adrp[2] = ioread16(ioaddr + MID_0H); + addr[0] = ioread16(ioaddr + MID_0L); + addr[1] = ioread16(ioaddr + MID_0M); + addr[2] = ioread16(ioaddr + MID_0H); + eth_hw_addr_set(dev, (u8 *)addr); /* Some bootloader/BIOSes do not initialize * MAC address, warn about that */ - if (!(adrp[0] || adrp[1] || adrp[2])) { + if (!(addr[0] || addr[1] || addr[2])) { netdev_warn(dev, "MAC address not initialized, " "generating random\n"); eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 2b84b4565e64..4f39f843bb3a 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1624,7 +1624,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&cp->lock); @@ -1889,6 +1889,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *regs; resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; + __le16 addr[ETH_ALEN / 2]; pr_info_once("%s", version); @@ -1979,8 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((__le16 *) (dev->dev_addr))[i] = - cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); + addr[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); + eth_hw_addr_set(dev, (u8 *)addr); dev->netdev_ops = &cp_netdev_ops; netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 2e6923cc653e..15b40fd93cd2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -945,6 +945,7 @@ static int rtl8139_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct rtl8139_private *tp; + __le16 addr[ETH_ALEN / 2]; int i, addr_len, option; void __iomem *ioaddr; static int board_idx = -1; @@ -994,8 +995,8 @@ static int rtl8139_init_one(struct pci_dev *pdev, addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((__le16 *) (dev->dev_addr))[i] = - cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + eth_hw_addr_set(dev, (u8 *)addr); /* The Rtl8139-specific entries in the device structure. */ dev->netdev_ops = &rtl8139_netdev_ops; @@ -2238,7 +2239,7 @@ static int rtl8139_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&tp->lock); diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index b6c849b258a0..6cbcb3164367 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -368,6 +368,7 @@ static int __init atp_probe1(long ioaddr) static void __init get_node_ID(struct net_device *dev) { long ioaddr = dev->base_addr; + __be16 addr[ETH_ALEN / 2]; int sa_offset = 0; int i; @@ -379,8 +380,9 @@ static void __init get_node_ID(struct net_device *dev) sa_offset = 15; for (i = 0; i < 3; i++) - ((__be16 *)dev->dev_addr)[i] = + addr[i] = cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i))); + eth_hw_addr_set(dev, (u8 *)addr); write_reg(ioaddr, CMR2, CMR2_NULL); } diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 2728df46ec41..8da4b66b71b5 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -37,7 +37,7 @@ enum mac_version { RTL_GIGA_MAC_VER_24, RTL_GIGA_MAC_VER_25, RTL_GIGA_MAC_VER_26, - RTL_GIGA_MAC_VER_27, + /* support for RTL_GIGA_MAC_VER_27 has been removed */ RTL_GIGA_MAC_VER_28, RTL_GIGA_MAC_VER_29, RTL_GIGA_MAC_VER_30, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 46a6ff9a782d..0199914440ab 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -118,7 +118,6 @@ static const struct { [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, - [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, @@ -985,33 +984,6 @@ DECLARE_RTL_COND(rtl_ocpar_cond) return RTL_R32(tp, OCPAR) & OCPAR_FLAG; } -static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) -{ - RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); -} - -static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - r8168dp_1_mdio_access(tp, reg, - OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); -} - -static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) -{ - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); - - mdelay(1); - RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; -} - #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 static void r8168dp_2_mdio_start(struct rtl8169_private *tp) @@ -1053,9 +1025,6 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) static void rtl_writephy(struct rtl8169_private *tp, int location, int val) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - r8168dp_1_mdio_write(tp, location, val); - break; case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); @@ -1072,8 +1041,6 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) static int rtl_readphy(struct rtl8169_private *tp, int location) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - return r8168dp_1_mdio_read(tp, location); case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); @@ -1235,7 +1202,6 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp) static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; @@ -2040,8 +2006,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168DP family. */ /* It seems this early RTL8168dp version never made it to - * the wild. Let's see whether somebody complains, if not - * we'll remove support for this chip version completely. + * the wild. Support has been removed. * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, */ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, @@ -2371,7 +2336,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp) r8168c_hw_jumbo_disable(tp); } break; - case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_28: if (jumbo) r8168dp_hw_jumbo_enable(tp); else @@ -3719,7 +3684,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, - [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, @@ -3982,7 +3946,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) goto no_reset; switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 50f0f621b1aa..f7ad5487879b 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -548,64 +548,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); } -static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x10, 0x0008 }, - { 0x0d, 0x006c }, - - { 0x1f, 0x0000 }, - { 0x0d, 0xf880 }, - - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0001 }, - { 0x0b, 0xa4d8 }, - { 0x09, 0x281c }, - { 0x07, 0x2883 }, - { 0x0a, 0x6b35 }, - { 0x1d, 0x3da4 }, - { 0x1c, 0xeffd }, - { 0x14, 0x7f52 }, - { 0x18, 0x7fc6 }, - { 0x08, 0x0601 }, - { 0x06, 0x4063 }, - { 0x10, 0xf074 }, - { 0x1f, 0x0003 }, - { 0x13, 0x0789 }, - { 0x12, 0xf4bd }, - { 0x1a, 0x04fd }, - { 0x14, 0x84b0 }, - { 0x1f, 0x0000 }, - { 0x00, 0x9200 }, - - { 0x1f, 0x0005 }, - { 0x01, 0x0340 }, - { 0x1f, 0x0001 }, - { 0x04, 0x4000 }, - { 0x03, 0x1d21 }, - { 0x02, 0x0c32 }, - { 0x01, 0x0200 }, - { 0x00, 0x5554 }, - { 0x04, 0x4800 }, - { 0x04, 0x4000 }, - { 0x04, 0xf000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x101a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0xf000 }, - { 0x1f, 0x0000 }, - }; - - rtl_writephy_batch(phydev, phy_reg_init); - r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); -} - static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1332,7 +1274,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, - [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 47c5377e4f42..08062d73df10 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -81,6 +81,7 @@ enum ravb_reg { RQC3 = 0x00A0, RQC4 = 0x00A4, RPC = 0x00B0, + RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */ UFCW = 0x00BC, UFCS = 0x00C0, UFCV0 = 0x00C4, @@ -187,19 +188,23 @@ enum ravb_reg { PIR = 0x0520, PSR = 0x0528, PIPR = 0x052c, + CXR31 = 0x0530, /* RZ/G2L only */ MPR = 0x0558, PFTCR = 0x055c, PFRCR = 0x0560, GECMR = 0x05b0, MAHR = 0x05c0, MALR = 0x05c8, - TROCR = 0x0700, /* R-Car Gen3 only */ + TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */ + CXR41 = 0x0708, /* RZ/G2L only */ + CXR42 = 0x0710, /* RZ/G2L only */ CEFCR = 0x0740, FRECR = 0x0748, TSFRCR = 0x0750, TLFRCR = 0x0758, RFCR = 0x0760, MAFCR = 0x0778, + CSR0 = 0x0800, /* RZ/G2L only */ }; @@ -810,10 +815,11 @@ enum ECMR_BIT { ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */ ECMR_RXF = 0x00020000, ECMR_PFR = 0x00040000, - ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */ + ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */ ECMR_RZPF = 0x00100000, ECMR_DPAD = 0x00200000, ECMR_RCSC = 0x00800000, + ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */ ECMR_TRCCM = 0x04000000, }; @@ -823,6 +829,7 @@ enum ECSR_BIT { ECSR_MPD = 0x00000002, ECSR_LCHNG = 0x00000004, ECSR_PHYI = 0x00000008, + ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */ }; /* ECSIPR */ @@ -857,9 +864,13 @@ enum MPR_BIT { /* GECMR */ enum GECMR_BIT { - GECMR_SPEED = 0x00000001, - GECMR_SPEED_100 = 0x00000000, - GECMR_SPEED_1000 = 0x00000001, + GECMR_SPEED = 0x00000001, + GECMR_SPEED_100 = 0x00000000, + GECMR_SPEED_1000 = 0x00000001, + GBETH_GECMR_SPEED = 0x00000030, + GBETH_GECMR_SPEED_10 = 0x00000000, + GBETH_GECMR_SPEED_100 = 0x00000010, + GBETH_GECMR_SPEED_1000 = 0x00000020, }; /* The Ethernet AVB descriptor definitions. */ @@ -949,6 +960,16 @@ enum RAVB_QUEUE { RAVB_NC, /* Network Control Queue */ }; +enum CXR31_BIT { + CXR31_SEL_LINK0 = 0x00000001, + CXR31_SEL_LINK1 = 0x00000008, +}; + +enum CSR0_BIT { + CSR0_TPE = 0x00000010, + CSR0_RPE = 0x00000020, +}; + #define DBAT_ENTRY_NUM 22 #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 @@ -956,6 +977,9 @@ enum RAVB_QUEUE { #define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) +#define GBETH_RX_BUFF_MAX 8192 +#define GBETH_RX_DESC_DATA_SIZE 4080 + struct ravb_tstamp_skb { struct list_head list; struct sk_buff *skb; @@ -985,8 +1009,8 @@ struct ravb_hw_info { void *(*alloc_rx_desc)(struct net_device *ndev, int q); bool (*receive)(struct net_device *ndev, int *quota, int q); void (*set_rate)(struct net_device *ndev); - int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features); - void (*dmac_init)(struct net_device *ndev); + int (*set_feature)(struct net_device *ndev, netdev_features_t features); + int (*dmac_init)(struct net_device *ndev); void (*emac_init)(struct net_device *ndev); const char (*gstrings_stats)[ETH_GSTRING_LEN]; size_t gstrings_size; @@ -994,14 +1018,20 @@ struct ravb_hw_info { netdev_features_t net_features; int stats_len; size_t max_rx_len; + u32 tccr_mask; + u32 rx_max_buf_size; unsigned aligned_tx: 1; /* hardware features */ unsigned internal_delay:1; /* AVB-DMAC has internal delays */ unsigned tx_counters:1; /* E-MAC has TX counters */ + unsigned carrier_counters:1; /* E-MAC has carrier counters */ unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ - unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */ - unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */ + unsigned gptp:1; /* AVB-DMAC has gPTP support */ + unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ + unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ + unsigned magic_pkt:1; /* E-MAC supports magic packet detection */ + unsigned half_duplex:1; /* E-MAC supports half duplex mode */ }; struct ravb_private { @@ -1018,9 +1048,11 @@ struct ravb_private { struct ravb_desc *desc_bat; dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; + struct ravb_rx_desc *gbeth_rx_ring; struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; void *tx_align[NUM_TX_QUEUE]; + struct sk_buff *rx_1st_skb; struct sk_buff **rx_skb[NUM_RX_QUEUE]; struct sk_buff **tx_skb[NUM_TX_QUEUE]; u32 rx_over_errors; @@ -1056,6 +1088,8 @@ struct ravb_private { unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */ unsigned int num_tx_desc; /* TX descriptors per packet */ + int duplex; + const struct ravb_hw_info *info; struct reset_control *rstc; }; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0f85f2d97b18..e5243cc87a19 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -83,7 +83,24 @@ static int ravb_config(struct net_device *ndev) return error; } -static void ravb_set_rate(struct net_device *ndev) +static void ravb_set_rate_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + switch (priv->speed) { + case 10: /* 10BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR); + break; + case 100: /* 100BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR); + break; + case 1000: /* 1000BASE */ + ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR); + break; + } +} + +static void ravb_set_rate_rcar(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -115,7 +132,7 @@ static void ravb_read_mac_address(struct device_node *np, { int ret; - ret = of_get_mac_address(np, ndev->dev_addr); + ret = of_get_ethdev_address(np, ndev); if (ret) { u32 mahr = ravb_read(ndev, MAHR); u32 malr = ravb_read(ndev, MALR); @@ -217,7 +234,32 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) return free_num; } -static void ravb_rx_ring_free(struct net_device *ndev, int q) +static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + unsigned int i; + + if (!priv->gbeth_rx_ring) + return; + + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + } + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, + priv->rx_desc_dma[q]); + priv->gbeth_rx_ring = NULL; +} + +static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int ring_size; @@ -283,7 +325,38 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; } -static void ravb_rx_ring_format(struct net_device *ndev, int q) +static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_rx_desc *rx_desc; + unsigned int rx_ring_size; + dma_addr_t dma_addr; + unsigned int i; + + rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; + memset(priv->gbeth_rx_ring, 0, rx_ring_size); + /* Build RX ring buffer */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + /* RX descriptor */ + rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); + rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->die_dt = DT_FEMPTY; + } + rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ +} + +static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); struct ravb_ex_rx_desc *rx_desc; @@ -356,7 +429,20 @@ static void ravb_ring_format(struct net_device *ndev, int q) desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); } -static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) +static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + + ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); + + priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->gbeth_rx_ring; +} + +static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int ring_size; @@ -426,7 +512,37 @@ error: return -ENOMEM; } -static void ravb_rcar_emac_init(struct net_device *ndev) +static void ravb_emac_init_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + /* Receive frame limit set register */ + ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); + + /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ + ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | + ECMR_TE | ECMR_RE | ECMR_RCPT | + ECMR_TXF | ECMR_RXF, ECMR); + + ravb_set_rate_gbeth(ndev); + + /* Set MAC address */ + ravb_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); + + /* E-MAC status register clear */ + ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); + ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); + + /* E-MAC interrupt enable register */ + ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); + + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0); +} + +static void ravb_emac_init_rcar(struct net_device *ndev) { /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); @@ -436,7 +552,7 @@ static void ravb_rcar_emac_init(struct net_device *ndev) (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | ECMR_TE | ECMR_RE, ECMR); - ravb_set_rate(ndev); + ravb_set_rate_rcar(ndev); /* Set MAC address */ ravb_write(ndev, @@ -461,10 +577,58 @@ static void ravb_emac_init(struct net_device *ndev) info->emac_init(ndev); } -static void ravb_rcar_dmac_init(struct net_device *ndev) +static int ravb_dmac_init_gbeth(struct net_device *ndev) +{ + int error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + + /* Set DMAC RX */ + ravb_write(ndev, 0x60000000, RCR); + + /* Set Max Frame Length (RTC) */ + ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC); + + /* Set FIFO size */ + ravb_write(ndev, 0x00222200, TGC); + + ravb_write(ndev, 0, TCCR); + + /* Frame receive */ + ravb_write(ndev, RIC0_FRE0, RIC0); + /* Disable FIFO full warning */ + ravb_write(ndev, 0x0, RIC1); + /* Receive FIFO full error, descriptor empty */ + ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2); + + ravb_write(ndev, TIC_FTE0, TIC); + + return 0; +} + +static int ravb_dmac_init_rcar(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + int error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + error = ravb_ring_init(ndev, RAVB_NC); + if (error) { + ravb_ring_free(ndev, RAVB_BE); + return error; + } + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + ravb_ring_format(ndev, RAVB_NC); /* Set AVB RX */ ravb_write(ndev, @@ -491,6 +655,8 @@ static void ravb_rcar_dmac_init(struct net_device *ndev) ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); + + return 0; } /* Device init function for Ethernet AVB */ @@ -505,20 +671,9 @@ static int ravb_dmac_init(struct net_device *ndev) if (error) return error; - error = ravb_ring_init(ndev, RAVB_BE); + error = info->dmac_init(ndev); if (error) return error; - error = ravb_ring_init(ndev, RAVB_NC); - if (error) { - ravb_ring_free(ndev, RAVB_BE); - return error; - } - - /* Descriptor format */ - ravb_ring_format(ndev, RAVB_BE); - ravb_ring_format(ndev, RAVB_NC); - - info->dmac_init(ndev); /* Setting the control will start the AVB-DMAC process. */ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); @@ -579,7 +734,151 @@ static void ravb_rx_csum(struct sk_buff *skb) skb_trim(skb, skb->len - sizeof(__sum16)); } -static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q) +static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, + struct ravb_rx_desc *desc) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + + skb = priv->rx_skb[RAVB_BE][entry]; + priv->rx_skb[RAVB_BE][entry] = NULL; + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE); + + return skb; +} + +/* Packet receive function for Gigabit Ethernet */ +static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct net_device_stats *stats; + struct ravb_rx_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + u8 desc_status; + int boguscnt; + u16 pkt_len; + u8 die_dt; + int entry; + int limit; + + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; + stats = &priv->stats[q]; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + desc = &priv->gbeth_rx_ring[entry]; + while (desc->die_dt != DT_FEMPTY) { + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + desc_status = desc->msc; + pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; + + if (--boguscnt < 0) + break; + + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + + if (desc_status & MSC_MC) + stats->multicast++; + + if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) { + stats->rx_errors++; + if (desc_status & MSC_CRC) + stats->rx_crc_errors++; + if (desc_status & MSC_RFE) + stats->rx_frame_errors++; + if (desc_status & (MSC_RTLF | MSC_RTSF)) + stats->rx_length_errors++; + if (desc_status & MSC_CEEF) + stats->rx_missed_errors++; + } else { + die_dt = desc->die_dt & 0xF0; + switch (die_dt) { + case DT_FSINGLE: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi[q], skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + break; + case DT_FSTART: + priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_put(priv->rx_1st_skb, pkt_len); + break; + case DT_FMID: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_copy_to_linear_data_offset(priv->rx_1st_skb, + priv->rx_1st_skb->len, + skb->data, + pkt_len); + skb_put(priv->rx_1st_skb, pkt_len); + dev_kfree_skb(skb); + break; + case DT_FEND: + skb = ravb_get_skb_gbeth(ndev, entry, desc); + skb_copy_to_linear_data_offset(priv->rx_1st_skb, + priv->rx_1st_skb->len, + skb->data, + pkt_len); + skb_put(priv->rx_1st_skb, pkt_len); + dev_kfree_skb(skb); + priv->rx_1st_skb->protocol = + eth_type_trans(priv->rx_1st_skb, ndev); + napi_gro_receive(&priv->napi[q], + priv->rx_1st_skb); + stats->rx_packets++; + stats->rx_bytes += priv->rx_1st_skb->len; + break; + } + } + + entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + } + + /* Refill the RX ring buffers. */ + for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { + entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); + + if (!priv->rx_skb[q][entry]) { + skb = netdev_alloc_skb(ndev, info->max_rx_len); + if (!skb) + break; + ravb_set_buffer_align(skb); + dma_addr = dma_map_single(ndev->dev.parent, + skb->data, + GBETH_RX_BUFF_MAX, + DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->ds_cc = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + priv->rx_skb[q][entry] = skb; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); + desc->die_dt = DT_FEMPTY; + } + + *quota -= limit - (++boguscnt); + + return boguscnt <= 0; +} + +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; @@ -717,11 +1016,13 @@ static void ravb_rcv_snd_enable(struct net_device *ndev) /* function for waiting dma process finished */ static int ravb_stop_dma(struct net_device *ndev) { + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int error; /* Wait for stopping the hardware TX process */ - error = ravb_wait(ndev, TCCR, - TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0); + error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); + if (error) return error; @@ -859,6 +1160,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; irqreturn_t result = IRQ_NONE; u32 iss; @@ -875,8 +1177,13 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; /* Network control and best effort queue RX/TX */ - for (q = RAVB_NC; q >= RAVB_BE; q--) { - if (ravb_queue_interrupt(ndev, q)) + if (info->nc_queues) { + for (q = RAVB_NC; q >= RAVB_BE; q--) { + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + } + } else { + if (ravb_queue_interrupt(ndev, RAVB_BE)) result = IRQ_HANDLED; } } @@ -966,16 +1273,25 @@ static int ravb_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + bool gptp = info->gptp || info->ccc_gac; + struct ravb_rx_desc *desc; unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); int quota = budget; + unsigned int entry; + if (!gptp) { + entry = priv->cur_rx[q] % priv->num_rx_ring[q]; + desc = &priv->gbeth_rx_ring[entry]; + } /* Processing RX Descriptor Ring */ /* Clear RX interrupt */ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (ravb_rx(ndev, "a, q)) - goto out; + if (gptp || desc->die_dt != DT_FEMPTY) { + if (ravb_rx(ndev, "a, q)) + goto out; + } /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); @@ -1000,7 +1316,8 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; - priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; + if (info->nc_queues) + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) @@ -1009,6 +1326,13 @@ out: return budget - quota; } +static void ravb_set_duplex_gbeth(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); +} + /* PHY state control function */ static void ravb_adjust_link(struct net_device *ndev) { @@ -1025,6 +1349,12 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_rcv_snd_disable(ndev); if (phydev->link) { + if (info->half_duplex && phydev->duplex != priv->duplex) { + new_state = true; + priv->duplex = phydev->duplex; + ravb_set_duplex_gbeth(ndev); + } + if (phydev->speed != priv->speed) { new_state = true; priv->speed = phydev->speed; @@ -1039,6 +1369,8 @@ static void ravb_adjust_link(struct net_device *ndev) new_state = true; priv->link = 0; priv->speed = 0; + if (info->half_duplex) + priv->duplex = -1; } /* Enable TX and RX right over here, if E-MAC change is ignored */ @@ -1061,6 +1393,7 @@ static int ravb_phy_init(struct net_device *ndev) { struct device_node *np = ndev->dev.parent->of_node; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct phy_device *phydev; struct device_node *pn; phy_interface_t iface; @@ -1068,6 +1401,7 @@ static int ravb_phy_init(struct net_device *ndev) priv->link = 0; priv->speed = 0; + priv->duplex = -1; /* Try connecting to PHY */ pn = of_parse_phandle(np, "phy-handle", 0); @@ -1106,15 +1440,17 @@ static int ravb_phy_init(struct net_device *ndev) netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } - /* 10BASE, Pause and Asym Pause is not supported */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + if (!info->half_duplex) { + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); - /* Half Duplex is not supported */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + /* Half Duplex is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + } phy_attached_info(phydev); @@ -1157,6 +1493,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value) priv->msg_enable = value; } +static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = { + "rx_queue_0_current", + "tx_queue_0_current", + "rx_queue_0_dirty", + "tx_queue_0_dirty", + "rx_queue_0_packets", + "tx_queue_0_packets", + "rx_queue_0_bytes", + "tx_queue_0_bytes", + "rx_queue_0_mcast_packets", + "rx_queue_0_errors", + "rx_queue_0_crc_errors", + "rx_queue_0_frame_errors", + "rx_queue_0_length_errors", + "rx_queue_0_csum_offload_errors", + "rx_queue_0_over_errors", +}; + static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_queue_0_current", "tx_queue_0_current", @@ -1208,11 +1562,14 @@ static void ravb_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *estats, u64 *data) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int num_rx_q; int i = 0; int q; + num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; /* Device-specific stats */ - for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) { + for (q = RAVB_BE; q < num_rx_q; q++) { struct net_device_stats *stats = &priv->stats[q]; data[i++] = priv->cur_rx[q]; @@ -1274,7 +1631,7 @@ static int ravb_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { netif_device_detach(ndev); /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ error = ravb_stop_dma(ndev); @@ -1287,7 +1644,8 @@ static int ravb_set_ringparam(struct net_device *ndev, /* Free all the skb's in the RX queue and the DMA buffers. */ ravb_ring_free(ndev, RAVB_BE); - ravb_ring_free(ndev, RAVB_NC); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); } /* Set new parameters */ @@ -1306,7 +1664,7 @@ static int ravb_set_ringparam(struct net_device *ndev, ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_init(ndev, priv->pdev); netif_device_attach(ndev); @@ -1319,6 +1677,7 @@ static int ravb_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *hw_info = priv->info; info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | @@ -1332,7 +1691,8 @@ static int ravb_get_ts_info(struct net_device *ndev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_ALL); - info->phc_index = ptp_clock_index(priv->ptp.clock); + if (hw_info->gptp || hw_info->ccc_gac) + info->phc_index = ptp_clock_index(priv->ptp.clock); return 0; } @@ -1348,8 +1708,9 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; - if (wol->wolopts & ~WAKE_MAGIC) + if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) return -EOPNOTSUPP; priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); @@ -1403,7 +1764,8 @@ static int ravb_open(struct net_device *ndev) int error; napi_enable(&priv->napi[RAVB_BE]); - napi_enable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_enable(&priv->napi[RAVB_NC]); if (!info->multi_irqs) { error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, @@ -1446,7 +1808,7 @@ static int ravb_open(struct net_device *ndev) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1460,7 +1822,7 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); out_free_irq_nc_tx: if (!info->multi_irqs) @@ -1477,7 +1839,8 @@ out_free_irq_emac: out_free_irq: free_irq(ndev->irq, ndev); out_napi_off: - napi_disable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); return error; } @@ -1508,7 +1871,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ @@ -1526,7 +1889,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) } ravb_ring_free(ndev, RAVB_BE); - ravb_ring_free(ndev, RAVB_NC); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); /* Device init */ error = ravb_dmac_init(ndev); @@ -1543,7 +1907,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) out: /* Initialise PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1553,6 +1917,7 @@ out: static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; unsigned int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; @@ -1629,28 +1994,30 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc->dptr = cpu_to_le32(dma_addr); /* TX timestamp required */ - if (q == RAVB_NC) { - ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); - if (!ts_skb) { - if (num_tx_desc > 1) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, - len, DMA_TO_DEVICE); + if (info->gptp || info->ccc_gac) { + if (q == RAVB_NC) { + ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); + if (!ts_skb) { + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } + goto unmap; } - goto unmap; + ts_skb->skb = skb_get(skb); + ts_skb->tag = priv->ts_skb_tag++; + priv->ts_skb_tag &= 0x3ff; + list_add_tail(&ts_skb->list, &priv->ts_skb_list); + + /* TAG and timestamp required flag */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); } - ts_skb->skb = skb_get(skb); - ts_skb->tag = priv->ts_skb_tag++; - priv->ts_skb_tag &= 0x3ff; - list_add_tail(&ts_skb->list, &priv->ts_skb_list); - /* TAG and timestamp required flag */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; - desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); + skb_tx_timestamp(skb); } - - skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); if (num_tx_desc > 1) { @@ -1698,28 +2065,45 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev) nstats = &ndev->stats; stats0 = &priv->stats[RAVB_BE]; - stats1 = &priv->stats[RAVB_NC]; if (info->tx_counters) { nstats->tx_dropped += ravb_read(ndev, TROCR); ravb_write(ndev, 0, TROCR); /* (write clear) */ } - nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; - nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; - nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes; - nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes; - nstats->multicast = stats0->multicast + stats1->multicast; - nstats->rx_errors = stats0->rx_errors + stats1->rx_errors; - nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors; - nstats->rx_frame_errors = - stats0->rx_frame_errors + stats1->rx_frame_errors; - nstats->rx_length_errors = - stats0->rx_length_errors + stats1->rx_length_errors; - nstats->rx_missed_errors = - stats0->rx_missed_errors + stats1->rx_missed_errors; - nstats->rx_over_errors = - stats0->rx_over_errors + stats1->rx_over_errors; + if (info->carrier_counters) { + nstats->collisions += ravb_read(ndev, CXR41); + ravb_write(ndev, 0, CXR41); /* (write clear) */ + nstats->tx_carrier_errors += ravb_read(ndev, CXR42); + ravb_write(ndev, 0, CXR42); /* (write clear) */ + } + + nstats->rx_packets = stats0->rx_packets; + nstats->tx_packets = stats0->tx_packets; + nstats->rx_bytes = stats0->rx_bytes; + nstats->tx_bytes = stats0->tx_bytes; + nstats->multicast = stats0->multicast; + nstats->rx_errors = stats0->rx_errors; + nstats->rx_crc_errors = stats0->rx_crc_errors; + nstats->rx_frame_errors = stats0->rx_frame_errors; + nstats->rx_length_errors = stats0->rx_length_errors; + nstats->rx_missed_errors = stats0->rx_missed_errors; + nstats->rx_over_errors = stats0->rx_over_errors; + if (info->nc_queues) { + stats1 = &priv->stats[RAVB_NC]; + + nstats->rx_packets += stats1->rx_packets; + nstats->tx_packets += stats1->tx_packets; + nstats->rx_bytes += stats1->rx_bytes; + nstats->tx_bytes += stats1->tx_bytes; + nstats->multicast += stats1->multicast; + nstats->rx_errors += stats1->rx_errors; + nstats->rx_crc_errors += stats1->rx_crc_errors; + nstats->rx_frame_errors += stats1->rx_frame_errors; + nstats->rx_length_errors += stats1->rx_length_errors; + nstats->rx_missed_errors += stats1->rx_missed_errors; + nstats->rx_over_errors += stats1->rx_over_errors; + } return nstats; } @@ -1752,7 +2136,7 @@ static int ravb_close(struct net_device *ndev) ravb_write(ndev, 0, TIC); /* Stop PTP Clock driver */ - if (info->no_ptp_cfg_active) + if (info->gptp) ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ @@ -1761,10 +2145,12 @@ static int ravb_close(struct net_device *ndev) "device will be stopped after h/w processes are done.\n"); /* Clear the timestamp list */ - list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { - list_del(&ts_skb->list); - kfree_skb(ts_skb->skb); - kfree(ts_skb); + if (info->gptp || info->ccc_gac) { + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { + list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); + kfree(ts_skb); + } } /* PHY disconnect */ @@ -1784,12 +2170,14 @@ static int ravb_close(struct net_device *ndev) } free_irq(ndev->irq, ndev); - napi_disable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); /* Free all the skb's in the RX queue and the DMA buffers. */ ravb_ring_free(ndev, RAVB_BE); - ravb_ring_free(ndev, RAVB_NC); + if (info->nc_queues) + ravb_ring_free(ndev, RAVB_NC); return 0; } @@ -1918,8 +2306,15 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable) spin_unlock_irqrestore(&priv->lock, flags); } -static int ravb_set_features_rx_csum(struct net_device *ndev, - netdev_features_t features) +static int ravb_set_features_gbeth(struct net_device *ndev, + netdev_features_t features) +{ + /* Place holder */ + return 0; +} + +static int ravb_set_features_rcar(struct net_device *ndev, + netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; @@ -1937,7 +2332,7 @@ static int ravb_set_features(struct net_device *ndev, struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - return info->set_rx_csum_feature(ndev, features); + return info->set_feature(ndev, features); } static const struct net_device_ops ravb_netdev_ops = { @@ -2001,43 +2396,72 @@ static int ravb_mdio_release(struct ravb_private *priv) } static const struct ravb_hw_info ravb_gen3_hw_info = { - .rx_ring_free = ravb_rx_ring_free, - .rx_ring_format = ravb_rx_ring_format, - .alloc_rx_desc = ravb_alloc_rx_desc, - .receive = ravb_rcar_rx, - .set_rate = ravb_set_rate, - .set_rx_csum_feature = ravb_set_features_rx_csum, - .dmac_init = ravb_rcar_dmac_init, - .emac_init = ravb_rcar_emac_init, + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, .gstrings_stats = ravb_gstrings_stats, .gstrings_size = sizeof(ravb_gstrings_stats), .net_hw_features = NETIF_F_RXCSUM, .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, - .ptp_cfg_active = 1, + .ccc_gac = 1, + .nc_queues = 1, + .magic_pkt = 1, }; static const struct ravb_hw_info ravb_gen2_hw_info = { - .rx_ring_free = ravb_rx_ring_free, - .rx_ring_format = ravb_rx_ring_format, - .alloc_rx_desc = ravb_alloc_rx_desc, - .receive = ravb_rcar_rx, - .set_rate = ravb_set_rate, - .set_rx_csum_feature = ravb_set_features_rx_csum, - .dmac_init = ravb_rcar_dmac_init, - .emac_init = ravb_rcar_emac_init, + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, .gstrings_stats = ravb_gstrings_stats, .gstrings_size = sizeof(ravb_gstrings_stats), .net_hw_features = NETIF_F_RXCSUM, .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, .aligned_tx = 1, - .no_ptp_cfg_active = 1, + .gptp = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + +static const struct ravb_hw_info gbeth_hw_info = { + .rx_ring_free = ravb_rx_ring_free_gbeth, + .rx_ring_format = ravb_rx_ring_format_gbeth, + .alloc_rx_desc = ravb_alloc_rx_desc_gbeth, + .receive = ravb_rx_gbeth, + .set_rate = ravb_set_rate_gbeth, + .set_feature = ravb_set_features_gbeth, + .dmac_init = ravb_dmac_init_gbeth, + .emac_init = ravb_emac_init_gbeth, + .gstrings_stats = ravb_gstrings_stats_gbeth, + .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), + .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), + .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN), + .tccr_mask = TCCR_TSRQ0, + .rx_max_buf_size = SZ_8K, + .aligned_tx = 1, + .tx_counters = 1, + .carrier_counters = 1, + .half_duplex = 1, }; static const struct of_device_id ravb_match_table[] = { @@ -2046,6 +2470,7 @@ static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, { } }; MODULE_DEVICE_TABLE(of, ravb_match_table); @@ -2080,13 +2505,15 @@ static void ravb_set_config_mode(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - if (info->no_ptp_cfg_active) { + if (info->gptp) { ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); - } else { + } else if (info->ccc_gac) { ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); + } else { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); } } @@ -2192,8 +2619,11 @@ static int ravb_probe(struct platform_device *pdev) priv->pdev = pdev; priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; - priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; - priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; + if (info->nc_queues) { + priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; + priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; + } + priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->addr)) { error = PTR_ERR(priv->addr); @@ -2252,7 +2682,7 @@ static int ravb_probe(struct platform_device *pdev) } clk_prepare_enable(priv->refclk); - ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer @@ -2269,13 +2699,15 @@ static int ravb_probe(struct platform_device *pdev) /* Set AVB config mode */ ravb_set_config_mode(ndev); - /* Set GTI value */ - error = ravb_set_gti(ndev); - if (error) - goto out_disable_refclk; + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ + error = ravb_set_gti(ndev); + if (error) + goto out_disable_refclk; - /* Request GTI loading */ - ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + } if (info->internal_delay) { ravb_parse_delay_mode(np, ndev); @@ -2301,7 +2733,7 @@ static int ravb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&priv->ts_skb_list); /* Initialise PTP Clock driver */ - if (info->ptp_cfg_active) + if (info->ccc_gac) ravb_ptp_init(ndev, pdev); /* Debug message level */ @@ -2323,7 +2755,8 @@ static int ravb_probe(struct platform_device *pdev) } netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); - netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); + if (info->nc_queues) + netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); /* Network device register */ error = register_netdev(ndev); @@ -2341,7 +2774,9 @@ static int ravb_probe(struct platform_device *pdev) return 0; out_napi_del: - netif_napi_del(&priv->napi[RAVB_NC]); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); out_dma_free: @@ -2349,7 +2784,7 @@ out_dma_free: priv->desc_bat_dma); /* Stop PTP Clock driver */ - if (info->ptp_cfg_active) + if (info->ccc_gac) ravb_ptp_stop(ndev); out_disable_refclk: clk_disable_unprepare(priv->refclk); @@ -2369,7 +2804,7 @@ static int ravb_remove(struct platform_device *pdev) const struct ravb_hw_info *info = priv->info; /* Stop PTP Clock driver */ - if (info->ptp_cfg_active) + if (info->ccc_gac) ravb_ptp_stop(ndev); clk_disable_unprepare(priv->refclk); @@ -2380,7 +2815,8 @@ static int ravb_remove(struct platform_device *pdev) ravb_write(ndev, CCC_OPC_RESET, CCC); pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); - netif_napi_del(&priv->napi[RAVB_NC]); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); @@ -2394,6 +2830,7 @@ static int ravb_remove(struct platform_device *pdev) static int ravb_wol_setup(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; /* Disable interrupts by clearing the interrupt masks. */ ravb_write(ndev, 0, RIC0); @@ -2402,7 +2839,8 @@ static int ravb_wol_setup(struct net_device *ndev) /* Only allow ECI interrupts */ synchronize_irq(priv->emac_irq); - napi_disable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); @@ -2415,9 +2853,11 @@ static int ravb_wol_setup(struct net_device *ndev) static int ravb_wol_restore(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int ret; - napi_enable(&priv->napi[RAVB_NC]); + if (info->nc_queues) + napi_enable(&priv->napi[RAVB_NC]); napi_enable(&priv->napi[RAVB_BE]); /* Disable MagicPacket */ @@ -2468,13 +2908,15 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Set AVB config mode */ ravb_set_config_mode(ndev); - /* Set GTI value */ - ret = ravb_set_gti(ndev); - if (ret) - return ret; + if (info->gptp || info->ccc_gac) { + /* Set GTI value */ + ret = ravb_set_gti(ndev); + if (ret) + return ret; - /* Request GTI loading */ - ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + } if (info->internal_delay) ravb_set_delay_mode(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 1374faa229a2..0a7d23df45f2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1153,7 +1153,7 @@ static void update_mac_address(struct net_device *ndev) static void read_mac_address(struct net_device *ndev, unsigned char *mac) { if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { - memcpy(ndev->dev_addr, mac, ETH_ALEN); + eth_hw_addr_set(ndev, mac); } else { u32 mahr = sh_eth_read(ndev, MAHR); u32 malr = sh_eth_read(ndev, MALR); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 3364b6a56bd1..f28c0c36b606 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1954,7 +1954,7 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); if (err) return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); return 0; } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h index 049dc6cf4611..0f45107db8dd 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -329,7 +329,7 @@ struct sxgbe_core_ops { /* Set power management mode (e.g. magic frame) */ void (*pmt)(void __iomem *ioaddr, unsigned long mode); /* Set/Get Unicast MAC addresses */ - void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr, + void (*set_umac_addr)(void __iomem *ioaddr, const unsigned char *addr, unsigned int reg_n); void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr, unsigned int reg_n); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c index e96e2bd295ef..7d9f257de92a 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c @@ -85,7 +85,8 @@ static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode) } /* Set/Get Unicast MAC addresses */ -static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, +static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, + const unsigned char *addr, unsigned int reg_n) { u32 high_word, low_word; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 4639ed9438a3..926532466691 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -118,7 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) } /* Get MAC address if available (DT) */ - of_get_mac_address(node, priv->dev->dev_addr); + of_get_ethdev_address(node, priv->dev); /* Get the TX/RX IRQ numbers */ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 37ff25a84030..96065dfc747b 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -167,7 +167,7 @@ static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) struct sgiseeq_private *sp = netdev_priv(dev); struct sockaddr *sa = addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + eth_hw_addr_set(dev, sa->sa_data); spin_lock_irq(&sp->tx_lock); __sgiseeq_set_mac_address(dev); @@ -764,7 +764,7 @@ static int sgiseeq_probe(struct platform_device *pdev) setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); - memcpy(dev->dev_addr, pd->mac, ETH_ALEN); + eth_hw_addr_set(dev, pd->mac); #ifdef DEBUG gpriv = sp; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e7e2223aebbf..cf366ed2557c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1038,7 +1038,7 @@ int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) } int efx_ef10_vport_add_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) + unsigned int port_id, const u8 *mac) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); @@ -1050,7 +1050,7 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx, } int efx_ef10_vport_del_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac) + unsigned int port_id, const u8 *mac) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 518268ce2064..6aa81229b68a 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1250,7 +1250,7 @@ int ef100_probe_pf(struct efx_nic *efx) if (rc) goto fail; /* Assign MAC address */ - memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN); + eth_hw_addr_set(net_dev, net_dev->perm_addr); memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN); return 0; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 752d6406f07e..7f5aa4a8c451 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -480,7 +480,7 @@ static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id, return rc; } -int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) { struct efx_ef10_nic_data *nic_data = efx->nic_data; struct ef10_vf *vf; @@ -523,7 +523,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) goto fail; if (vf->efx) - ether_addr_copy(vf->efx->net_dev->dev_addr, mac); + eth_hw_addr_set(vf->efx->net_dev, mac); } ether_addr_copy(vf->mac, mac); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index cfe556d17313..3c703ca878b0 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -39,7 +39,7 @@ static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} void efx_ef10_sriov_fini(struct efx_nic *efx); static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {} -int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac); +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos); @@ -60,9 +60,9 @@ int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); int efx_ef10_vport_add_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac); + unsigned int port_id, const u8 *mac); int efx_ef10_vport_del_mac(struct efx_nic *efx, - unsigned int port_id, u8 *mac); + unsigned int port_id, const u8 *mac); int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, u32 *port_flags, u32 *vadaptor_flags, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 43ef4f529028..6960a2fe2b53 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -136,7 +136,7 @@ static int efx_probe_port(struct efx_nic *efx) return rc; /* Initialise MAC address to permanent address */ - ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); return 0; } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 896b59253197..f187631b2c5c 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -181,11 +181,11 @@ int efx_set_mac_address(struct net_device *net_dev, void *data) /* save old address */ ether_addr_copy(old_addr, net_dev->dev_addr); - ether_addr_copy(net_dev->dev_addr, new_addr); + eth_hw_addr_set(net_dev, new_addr); if (efx->type->set_mac_address) { rc = efx->type->set_mac_address(efx); if (rc) { - ether_addr_copy(net_dev->dev_addr, old_addr); + eth_hw_addr_set(net_dev, old_addr); return rc; } } diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 423bdf81200f..c68837a951f4 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -1044,7 +1044,7 @@ static int ef4_probe_port(struct ef4_nic *efx) return rc; /* Initialise MAC address to permanent address */ - ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); return 0; } @@ -2162,11 +2162,11 @@ static int ef4_set_mac_address(struct net_device *net_dev, void *data) /* save old address */ ether_addr_copy(old_addr, net_dev->dev_addr); - ether_addr_copy(net_dev->dev_addr, new_addr); + eth_hw_addr_set(net_dev, new_addr); if (efx->type->set_mac_address) { rc = efx->type->set_mac_address(efx); if (rc) { - ether_addr_copy(net_dev->dev_addr, old_addr); + eth_hw_addr_set(net_dev, old_addr); return rc; } } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f6981810039d..cc15ee8812d9 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1440,7 +1440,7 @@ struct efx_nic_type { bool (*sriov_wanted)(struct efx_nic *efx); void (*sriov_reset)(struct efx_nic *efx); void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); - int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac); int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos); int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 83dcfcae3d4b..e9095cf06368 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1591,7 +1591,7 @@ void efx_fini_sriov(void) destroy_workqueue(vfdi_workqueue); } -int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) { struct siena_nic_data *nic_data = efx->nic_data; struct siena_vf *vf; diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h index e441c89c25ce..e548c4daf189 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.h +++ b/drivers/net/ethernet/sfc/siena_sriov.h @@ -46,7 +46,7 @@ bool efx_siena_sriov_wanted(struct efx_nic *efx); void efx_siena_sriov_reset(struct efx_nic *efx); void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); -int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac); +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf, u16 vlan, u8 qos); int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 062f7844c496..e2d009866a7b 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -243,7 +243,7 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr) struct ioc3_private *ip = netdev_priv(dev); struct sockaddr *sa = addr; - memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + eth_hw_addr_set(dev, sa->sa_data); spin_lock_irq(&ip->ioc3_lock); __ioc3_set_mac_address(dev); @@ -920,7 +920,7 @@ static int ioc3eth_probe(struct platform_device *pdev) ioc3_mii_start(ip); ioc3_ssram_disc(ip); - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(dev, mac_addr); /* The IOC3-specific entries in the device structure. */ dev->watchdog_timeo = 5 * HZ; diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index efce834d8ee6..6d850ea2b94c 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -836,7 +836,7 @@ static int meth_probe(struct platform_device *pdev) dev->watchdog_timeo = timeout; dev->irq = MACE_ETHERNET_IRQ; dev->base_addr = (unsigned long)&mace->eth; - memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); + eth_hw_addr_set(dev, o2meth_eaddr); priv = netdev_priv(dev); priv->pdev = pdev; diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 3d1a18a01ce5..5e66e3f3aafc 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1070,7 +1070,7 @@ static int sis190_open(struct net_device *dev) /* * Rx and Tx descriptors need 256 bytes alignment. - * pci_alloc_consistent() guarantees a stronger alignment. + * dma_alloc_coherent() guarantees a stronger alignment. */ tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES, &tp->tx_dma, GFP_KERNEL); @@ -1586,6 +1586,7 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + __le16 addr[ETH_ALEN / 2]; u16 sig; int i; @@ -1606,8 +1607,9 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, for (i = 0; i < ETH_ALEN / 2; i++) { u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); + addr[i] = cpu_to_le16(w); } + eth_hw_addr_set(dev, (u8 *)addr); sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 60a0c0e9ded2..3f5717a1874f 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -258,6 +258,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; + u16 addr[ETH_ALEN / 2]; u16 signature; int i; @@ -271,7 +272,8 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev, /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) - ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); + addr[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); + eth_hw_addr_set(net_dev, (u8 *)addr); return 1; } @@ -331,6 +333,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; + u16 addr[ETH_ALEN / 2]; u32 rfcrSave; u32 i; @@ -345,8 +348,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev, /* load MAC addr to filter data register */ for (i = 0 ; i < 3 ; i++) { sw32(rfcr, (i << RFADDR_shift)); - *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr); + addr[i] = sr16(rfdr); } + eth_hw_addr_set(net_dev, (u8 *)addr); /* enable packet filtering */ sw32(rfcr, rfcrSave | RFEN); @@ -375,17 +379,18 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, { struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; + u16 addr[ETH_ALEN / 2]; int wait, rc = 0; sw32(mear, EEREQ); for (wait = 0; wait < 2000; wait++) { if (sr32(mear) & EEGNT) { - u16 *mac = (u16 *)net_dev->dev_addr; int i; /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) - mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); + addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); + eth_hw_addr_set(net_dev, (u8 *)addr); rc = 1; break; @@ -1098,7 +1103,7 @@ sis900_init_rxfilter (struct net_device * net_dev) /* load MAC addr to filter data register */ for (i = 0 ; i < 3 ; i++) { - u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i); + u32 w = (u32) *((const u16 *)(net_dev->dev_addr)+i); sw32(rfcr, i << RFADDR_shift); sw32(rfdr, w); diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 44daf79a8f97..a0654e88444c 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -325,6 +325,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *dev; struct epic_private *ep; int i, ret, option = 0, duplex = 0; + __le16 addr[ETH_ALEN / 2]; void *ring_space; dma_addr_t ring_dma; @@ -416,7 +417,8 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Note: the '175 does not have a serial EEPROM. */ for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4)); + addr[i] = cpu_to_le16(er16(LAN0 + i*4)); + eth_hw_addr_set(dev, (u8 *)addr); if (debug > 2) { dev_dbg(&pdev->dev, "EEPROM contents:\n"); diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 42fc37c7887a..e5658aa39765 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -666,14 +666,13 @@ static int pcmcia_osi_mac(struct pcmcia_device *p_dev, void *priv) { struct net_device *dev = priv; - int i; if (tuple->TupleDataLen < 8) return -EINVAL; if (tuple->TupleData[0] != 0x04) return -EINVAL; - for (i = 0; i < 6; i++) - dev->dev_addr[i] = tuple->TupleData[i+2]; + + eth_hw_addr_set(dev, &tuple->TupleData[2]); return 0; }; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 199a97339280..73bcc6f2bb6e 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1503,7 +1503,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) /* Sets the device MAC address to dev_addr, called with mac_lock held */ static void -smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) +smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, const u8 dev_addr[6]) { u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | @@ -1939,7 +1939,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); spin_lock_irq(&pdata->mac_lock); smsc911x_set_hw_mac_address(pdata, dev->dev_addr); @@ -2375,7 +2375,7 @@ static int smsc911x_probe_config(struct smsc911x_platform_config *config, phy_interface = PHY_INTERFACE_MODE_NA; config->phy_interface = phy_interface; - device_get_mac_address(dev, config->mac, ETH_ALEN); + device_get_mac_address(dev, config->mac); err = device_property_read_u32(dev, "reg-io-width", &width); if (err == -ENXIO) @@ -2525,7 +2525,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) SMSC_TRACE(pdata, probe, "MAC Address is specified by configuration"); } else if (is_valid_ether_addr(pdata->config.mac)) { - memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN); + eth_hw_addr_set(dev, pdata->config.mac); SMSC_TRACE(pdata, probe, "MAC Address specified by platform data"); } else { diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index fdbd2a43e267..d207c0b463ab 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -404,7 +404,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = { static void smsc9420_set_mac_address(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - u8 *dev_addr = dev->dev_addr; + const u8 *dev_addr = dev->dev_addr; u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; @@ -788,7 +788,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) PKT_BUF_SZ, DMA_FROM_DEVICE); if (dma_mapping_error(&pd->pdev->dev, mapping)) { dev_kfree_skb_any(skb); - netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); + netif_warn(pd, rx_err, pd->dev, "dma_map_single failed!\n"); return -ENOMEM; } @@ -940,7 +940,7 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, DMA_TO_DEVICE); if (dma_mapping_error(&pd->pdev->dev, mapping)) { netif_warn(pd, tx_err, pd->dev, - "pci_map_single failed, dropping packet\n"); + "dma_map_single failed, dropping packet\n"); return NETDEV_TX_BUSY; } @@ -1551,7 +1551,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!pd->rx_ring) goto out_free_io_4; - /* descriptors are aligned due to the nature of pci_alloc_consistent */ + /* descriptors are aligned due to the nature of dma_alloc_coherent */ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); pd->tx_dma_addr = pd->rx_dma_addr + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 1f46af136aa8..baa9f5d1c549 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1860,10 +1860,9 @@ static int netsec_of_probe(struct platform_device *pdev, *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ - if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "phy_ref_clk not found\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), + "phy_ref_clk not found\n"); priv->freq = clk_get_rate(priv->clk); return 0; @@ -1886,19 +1885,17 @@ static int netsec_acpi_probe(struct platform_device *pdev, priv->phy_interface = PHY_INTERFACE_MODE_NA; ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); - if (ret) { - dev_err(&pdev->dev, - "missing required property 'phy-channel'\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "missing required property 'phy-channel'\n"); ret = device_property_read_u32(&pdev->dev, "socionext,phy-clock-frequency", &priv->freq); if (ret) - dev_err(&pdev->dev, - "missing required property 'socionext,phy-clock-frequency'\n"); - return ret; + return dev_err_probe(&pdev->dev, ret, + "missing required property 'socionext,phy-clock-frequency'\n"); + return 0; } static void netsec_unregister_mdio(struct netsec_priv *priv) @@ -1981,7 +1978,6 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) static int netsec_probe(struct platform_device *pdev) { struct resource *mmio_res, *eeprom_res, *irq_res; - u8 *mac, macbuf[ETH_ALEN]; struct netsec_priv *priv; u32 hw_ver, phy_addr = 0; struct net_device *ndev; @@ -2037,12 +2033,8 @@ static int netsec_probe(struct platform_device *pdev) goto free_ndev; } - mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf)); - if (mac) - ether_addr_copy(ndev->dev_addr, mac); - - if (priv->eeprom_base && - (!mac || !is_valid_ether_addr(ndev->dev_addr))) { + ret = device_get_ethdev_address(&pdev->dev, ndev); + if (ret && priv->eeprom_base) { void __iomem *macp = priv->eeprom_base + NETSEC_EEPROM_MAC_ADDRESS; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index ae31ed93aaf0..4b0fe0f58bbf 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev) ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); - ret = of_get_mac_address(np, ndev->dev_addr); + ret = of_get_ethdev_address(np, ndev); if (ret) { /* if the mac address is invalid, use random mac address */ eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b6d945ea903d..9160f9ed363a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -546,13 +546,13 @@ int dwmac4_setup(struct stmmac_priv *priv); int dwxgmac2_setup(struct stmmac_priv *priv); int dwxlgmac2_setup(struct stmmac_priv *priv); -void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low); void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); void stmmac_set_mac(void __iomem *ioaddr, bool enable); -void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low); void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4422baeed3d8..617d0e4c6495 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -634,7 +634,7 @@ static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable) * If addr is NULL, clear the slot */ static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index d046e33b8a29..66fc8be34bb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -171,10 +171,9 @@ static int visconti_eth_clock_probe(struct platform_device *pdev, int err; dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(dwmac->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - return PTR_ERR(dwmac->phy_ref_clk); - } + if (IS_ERR(dwmac->phy_ref_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->phy_ref_clk), + "phy_ref_clk clock not found.\n"); err = clk_prepare_enable(dwmac->phy_ref_clk); if (err < 0) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index fc8759f146c7..76edb9b72675 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -104,7 +104,7 @@ static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) } static void dwmac1000_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index ebcad8dd99db..75071a7d551a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -68,7 +68,7 @@ static int dwmac100_irq_status(struct mac_device_info *hw, } static void dwmac100_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index b21745368983..fd41db65fe1d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -322,7 +322,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) } static void dwmac4_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, unsigned int reg_n) + const unsigned char *addr, unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9292a1fab7d3..d1c605777985 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -187,7 +187,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, return ret; } -void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low) { unsigned long data; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index d1c31200bb91..caa4bfc4c1d6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -239,7 +239,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr) do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); } -void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], unsigned int high, unsigned int low) { unsigned long data; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index c4d78fa93663..c6c4d7948fe5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -335,7 +335,8 @@ static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode) } static void dwxgmac2_set_umac_addr(struct mac_device_info *hw, - unsigned char *addr, unsigned int reg_n) + const unsigned char *addr, + unsigned int reg_n) { void __iomem *ioaddr = hw->pcsr; u32 value; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index fe2660d5694d..f7dc447f05a0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -330,7 +330,8 @@ struct stmmac_ops { /* Set power management mode (e.g. magic frame) */ void (*pmt)(struct mac_device_info *hw, unsigned long mode); /* Set/Get Unicast MAC addresses */ - void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + void (*set_umac_addr)(struct mac_device_info *hw, + const unsigned char *addr, unsigned int reg_n); void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, unsigned int reg_n); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eb3b7bf771d7..b720539ccb0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3510,6 +3510,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request Rx MSI irq */ for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (i >= MTL_MAX_RX_QUEUES) + break; if (priv->rx_irq[i] == 0) continue; @@ -3533,6 +3535,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request Tx MSI irq */ for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (i >= MTL_MAX_TX_QUEUES) + break; if (priv->tx_irq[i] == 0) continue; @@ -6815,7 +6819,7 @@ int stmmac_dvr_probe(struct device *device, priv->tx_irq[i] = res->tx_irq[i]; if (!is_zero_ether_addr(res->mac)) - memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); + eth_hw_addr_set(priv->dev, res->mac); dev_set_drvdata(device, priv->dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 0462dcc93e53..be3cb63675a5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -36,7 +36,7 @@ struct stmmac_packet_attrs { int vlan_id_in; int vlan_id_out; unsigned char *src; - unsigned char *dst; + const unsigned char *dst; u32 ip_src; u32 ip_dst; int tcp; @@ -249,8 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + const unsigned char *dst = tpriv->packet->dst; unsigned char *src = tpriv->packet->src; - unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@ -1104,13 +1104,13 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) goto cleanup_sel; } - actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); + actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL); if (!actions) { ret = -ENOMEM; goto cleanup_exts; } - act = kzalloc(nk * sizeof(*act), GFP_KERNEL); + act = kcalloc(nk, sizeof(*act), GFP_KERNEL); if (!act) { ret = -ENOMEM; goto cleanup_actions; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 287ae4c538aa..d2d4f47c7e28 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -3027,7 +3027,7 @@ static void cas_mac_reset(struct cas *cp) /* Must be invoked under cp->lock. */ static void cas_init_mac(struct cas *cp) { - unsigned char *e = &cp->dev->dev_addr[0]; + const unsigned char *e = &cp->dev->dev_addr[0]; int i; cas_mac_reset(cp); @@ -3379,6 +3379,7 @@ static void cas_check_pci_invariants(struct cas *cp) static int cas_check_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; + u8 addr[ETH_ALEN]; u32 cfg; int i; @@ -3407,8 +3408,8 @@ static int cas_check_invariants(struct cas *cp) /* finish phy determination. MDIO1 takes precedence over MDIO0 if * they're both connected. */ - cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, - PCI_SLOT(pdev->devfn)); + cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn)); + eth_hw_addr_set(cp->dev, addr); if (cp->phy_type & CAS_PHY_SERDES) { cp->cas_flags |= CAS_FLAG_1000MB_CAP; return 0; /* no more checking needed */ diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 50bd4e3b0af9..6b59b14e74b1 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -230,7 +230,6 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], { struct net_device *dev; struct vnet_port *port; - int i; dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1); if (!dev) @@ -238,10 +237,8 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->needed_headroom = VNET_PACKET_SKIP + 8; dev->needed_tailroom = 8; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = hwaddr[i]; - dev->perm_addr[i] = dev->dev_addr[i]; - } + eth_hw_addr_set(dev, hwaddr); + ether_addr_copy(dev->perm_addr, dev->dev_addr); sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index a68a01d1b2b1..ba8ad76313a9 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -2603,7 +2603,7 @@ static int niu_init_link(struct niu *np) return 0; } -static void niu_set_primary_mac(struct niu *np, unsigned char *addr) +static void niu_set_primary_mac(struct niu *np, const unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; @@ -6386,7 +6386,7 @@ static int niu_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) return 0; @@ -8312,6 +8312,7 @@ static void niu_pci_vpd_validate(struct niu *np) { struct net_device *dev = np->dev; struct niu_vpd *vpd = &np->vpd; + u8 addr[ETH_ALEN]; u8 val8; if (!is_valid_ether_addr(&vpd->local_mac[0])) { @@ -8344,17 +8345,20 @@ static void niu_pci_vpd_validate(struct niu *np) return; } - memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); + ether_addr_copy(addr, vpd->local_mac); - val8 = dev->dev_addr[5]; - dev->dev_addr[5] += np->port; - if (dev->dev_addr[5] < val8) - dev->dev_addr[4]++; + val8 = addr[5]; + addr[5] += np->port; + if (addr[5] < val8) + addr[4]++; + + eth_hw_addr_set(dev, addr); } static int niu_pci_probe_sprom(struct niu *np) { struct net_device *dev = np->dev; + u8 addr[ETH_ALEN]; int len, i; u64 val, sum; u8 val8; @@ -8446,27 +8450,29 @@ static int niu_pci_probe_sprom(struct niu *np) val = nr64(ESPC_MAC_ADDR0); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); - dev->dev_addr[0] = (val >> 0) & 0xff; - dev->dev_addr[1] = (val >> 8) & 0xff; - dev->dev_addr[2] = (val >> 16) & 0xff; - dev->dev_addr[3] = (val >> 24) & 0xff; + addr[0] = (val >> 0) & 0xff; + addr[1] = (val >> 8) & 0xff; + addr[2] = (val >> 16) & 0xff; + addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); - dev->dev_addr[4] = (val >> 0) & 0xff; - dev->dev_addr[5] = (val >> 8) & 0xff; + addr[4] = (val >> 0) & 0xff; + addr[5] = (val >> 8) & 0xff; - if (!is_valid_ether_addr(&dev->dev_addr[0])) { + if (!is_valid_ether_addr(addr)) { dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", - dev->dev_addr); + addr); return -EINVAL; } - val8 = dev->dev_addr[5]; - dev->dev_addr[5] += np->port; - if (dev->dev_addr[5] < val8) - dev->dev_addr[4]++; + val8 = addr[5]; + addr[5] += np->port; + if (addr[5] < val8) + addr[4]++; + + eth_hw_addr_set(dev, addr); val = nr64(ESPC_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, @@ -9235,7 +9241,7 @@ static int niu_get_of_props(struct niu *np) netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", dp, prop_len); } - memcpy(dev->dev_addr, mac_addr, dev->addr_len); + eth_hw_addr_set(dev, mac_addr); if (!is_valid_ether_addr(&dev->dev_addr[0])) { netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index c646575e79d5..531a6f449afa 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -623,7 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, bool non_blocking) void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; __u32 bblk_dvma = (__u32)bp->bblock_dvma; - unsigned char *e = &bp->dev->dev_addr[0]; + const unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); @@ -1076,7 +1076,6 @@ static int bigmac_ether_init(struct platform_device *op, struct net_device *dev; u8 bsizes, bsizes_more; struct bigmac *bp; - int i; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); @@ -1086,8 +1085,7 @@ static int bigmac_ether_init(struct platform_device *op, if (version_printed++ == 0) printk(KERN_INFO "%s", version); - for (i = 0; i < 6; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; + eth_hw_addr_set(dev, idprom->id_ethaddr); /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index d72018a60c0f..036856102c50 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1810,7 +1810,7 @@ static u32 gem_setup_multicast(struct gem *gp) static void gem_init_mac(struct gem *gp) { - unsigned char *e = &gp->dev->dev_addr[0]; + const unsigned char *e = &gp->dev->dev_addr[0]; writel(0x1bf0, gp->regs + MAC_SNDPAUSE); @@ -2087,7 +2087,7 @@ static void gem_stop_phy(struct gem *gp, int wol) writel(mifcfg, gp->regs + MIF_CFG); if (wol && gp->has_wol) { - unsigned char *e = &gp->dev->dev_addr[0]; + const unsigned char *e = &gp->dev->dev_addr[0]; u32 csr; /* Setup wake-on-lan for MAGIC packet */ @@ -2431,13 +2431,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) static int gem_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *macaddr = (struct sockaddr *) addr; + const unsigned char *e = &dev->dev_addr[0]; struct gem *gp = netdev_priv(dev); - unsigned char *e = &dev->dev_addr[0]; if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, macaddr->sa_data); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) @@ -2797,9 +2797,12 @@ static int gem_get_device_address(struct gem *gp) return -1; #endif } - memcpy(dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); #else - get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); + u8 addr[ETH_ALEN]; + + get_gem_mac_nonobp(gp->pdev, addr); + eth_hw_addr_set(gp->dev, addr); #endif return 0; } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 62f81b0d14ed..ad9029ae6848 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1395,13 +1395,13 @@ force_link: /* hp->happy_lock must be held */ static int happy_meal_init(struct happy_meal *hp) { + const unsigned char *e = &hp->dev->dev_addr[0]; void __iomem *gregs = hp->gregs; void __iomem *etxregs = hp->etxregs; void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; u32 regtmp, rxcfg; - unsigned char *e = &hp->dev->dev_addr[0]; /* If auto-negotiation timer is running, kill it. */ del_timer(&hp->happy_timer); @@ -2661,6 +2661,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) struct happy_meal *hp; struct net_device *dev; int i, qfe_slot = -1; + u8 addr[ETH_ALEN]; int err = -ENODEV; sbus_dp = op->dev.parent->of_node; @@ -2698,7 +2699,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) } if (i < 6) { /* a mac address was given */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = macaddr[i]; + addr[i] = macaddr[i]; + eth_hw_addr_set(dev, addr); macaddr[5]++; } else { const unsigned char *addr; @@ -2707,9 +2709,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) addr = of_get_property(dp, "local-mac-address", &len); if (qfe_slot != -1 && addr && len == ETH_ALEN) - memcpy(dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); else - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + eth_hw_addr_set(dev, idprom->id_ethaddr); } hp = netdev_priv(dev); @@ -2969,6 +2971,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, unsigned long hpreg_res; int i, qfe_slot = -1; char prom_name[64]; + u8 addr[ETH_ALEN]; int err; /* Now make sure pci_dev cookie is there. */ @@ -3044,7 +3047,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, } if (i < 6) { /* a mac address was given */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = macaddr[i]; + addr[i] = macaddr[i]; + eth_hw_addr_set(dev, addr); macaddr[5]++; } else { #ifdef CONFIG_SPARC @@ -3055,12 +3059,15 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, (addr = of_get_property(dp, "local-mac-address", &len)) != NULL && len == 6) { - memcpy(dev->dev_addr, addr, ETH_ALEN); + eth_hw_addr_set(dev, addr); } else { - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + eth_hw_addr_set(dev, idprom->id_ethaddr); } #else - get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); + u8 addr[ETH_ALEN]; + + get_hme_mac_nonsparc(pdev, addr); + eth_hw_addr_set(dev, addr); #endif } diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 577cd9753d8e..efe0d33f6024 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -144,7 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq) void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; - unsigned char *e = &qep->dev->dev_addr[0]; + const unsigned char *e = &qep->dev->dev_addr[0]; __u32 qblk_dvma = (__u32)qep->qblock_dvma; u32 tmp; int i; @@ -844,7 +844,7 @@ static int qec_ether_init(struct platform_device *op) if (!dev) return -ENOMEM; - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + eth_hw_addr_set(dev, idprom->id_ethaddr); qe = netdev_priv(dev); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c index df26cea45904..5c9b6c90942b 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c @@ -78,7 +78,7 @@ static int xlgmac_init(struct xlgmac_pdata *pdata) netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->mac_regs; xlgmac_read_mac_addr(pdata); - memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); + eth_hw_addr_set(netdev, pdata->mac_addr); /* Set all the function pointers */ xlgmac_init_all_ops(pdata); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c index bf6c1c6779ff..76eb7db80f13 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c @@ -57,7 +57,7 @@ static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) return 0; } -static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr) +static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 1db7104fef3a..d435519236e4 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -798,7 +798,7 @@ static int xlgmac_set_mac_address(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, saddr->sa_data); hw_ops->set_mac_address(pdata, netdev->dev_addr); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h index 8598aaf3ec99..98e3a271e017 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h @@ -410,7 +410,7 @@ struct xlgmac_hw_ops { void (*dev_xmit)(struct xlgmac_channel *channel); int (*dev_read)(struct xlgmac_channel *channel); - int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr); + int (*set_mac_address)(struct xlgmac_pdata *pdata, const u8 *addr); int (*config_rx_mode)(struct xlgmac_pdata *pdata); int (*enable_rx_csum)(struct xlgmac_pdata *pdata); int (*disable_rx_csum)(struct xlgmac_pdata *pdata); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 6b409f9c5863..3e8a3fde8302 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -832,7 +832,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p) if (netif_running(dev)) return -EBUSY */ - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); bdx_restore_mac(ndev, priv); RET(0); } diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 6e4d4f9e32e0..b05de9b61ad6 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -61,7 +61,7 @@ struct am65_cpsw_regdump_item { #define AM65_CPSW_REGDUMP_REC(mod, start, end) { \ .hdr.module_id = (mod), \ - .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \ + .hdr.len = (end + 4 - start) * 2 + \ sizeof(struct am65_cpsw_regdump_hdr), \ .start_ofs = (start), \ .end_ofs = end, \ diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 130346f74ee8..c092cb61416a 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1918,7 +1918,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) port->port_id, port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { - random_ether_addr(port->slave.mac_addr); + eth_random_addr(port->slave.mac_addr); dev_err(dev, "Use random MAC address\n"); } } @@ -1970,7 +1970,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) ndev_priv->msg_enable = AM65_CPSW_DEBUG; SET_NETDEV_DEV(port->ndev, dev); - ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr); + eth_hw_addr_set(port->ndev, port->slave.mac_addr); port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; @@ -2429,12 +2429,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) dl_priv = devlink_priv(common->devlink); dl_priv->common = common; - ret = devlink_register(common->devlink); - if (ret) { - dev_err(dev, "devlink reg fail ret:%d\n", ret); - goto dl_free; - } - /* Provide devlink hook to switch mode when multiple external ports * are present NUSS switchdev driver is enabled. */ @@ -2447,7 +2441,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) dev_err(dev, "devlink params reg fail ret:%d\n", ret); goto dl_unreg; } - devlink_params_publish(common->devlink); } for (i = 1; i <= common->port_num; i++) { @@ -2468,7 +2461,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) } devlink_port_type_eth_set(dl_port, port->ndev); } - + devlink_register(common->devlink); return ret; dl_port_unreg: @@ -2479,10 +2472,7 @@ dl_port_unreg: devlink_port_unregister(dl_port); } dl_unreg: - devlink_unregister(common->devlink); -dl_free: devlink_free(common->devlink); - return ret; } @@ -2492,6 +2482,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) struct am65_cpsw_port *port; int i; + devlink_unregister(common->devlink); + for (i = 1; i <= common->port_num; i++) { port = am65_common_get_port(common, i); dl_port = &port->devlink_port; @@ -2500,13 +2492,11 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) } if (!AM65_CPSW_IS_CPSW2G(common) && - IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { - devlink_params_unpublish(common->devlink); - devlink_params_unregister(common->devlink, am65_cpsw_devlink_params, + IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) + devlink_params_unregister(common->devlink, + am65_cpsw_devlink_params, ARRAY_SIZE(am65_cpsw_devlink_params)); - } - devlink_unregister(common->devlink); devlink_free(common->devlink); } diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 02d4e51f7306..7449436fc87c 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1112,7 +1112,7 @@ static int cpmac_probe(struct platform_device *pdev) priv->dev = dev; priv->ring_size = 64; priv->msg_enable = netif_msg_init(debug_level, 0xff); - memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); + eth_hw_addr_set(dev, pdata->dev_addr); snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 66f7ddd9b1f9..33142d505fc8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -985,7 +985,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) flags, vid); memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); - memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, priv->mac_addr); for_each_slave(priv, cpsw_set_slave_mac, priv); pm_runtime_put(cpsw->dev); @@ -1460,7 +1460,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); } - memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, priv_sl2->mac_addr); priv_sl2->emac_port = 1; cpsw->slaves[1].ndev = ndev; @@ -1639,7 +1639,7 @@ static int cpsw_probe(struct platform_device *pdev) dev_info(dev, "Random MACID = %pM\n", priv->mac_addr); } - memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, priv->mac_addr); cpsw->slaves[0].ndev = ndev; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 7968f24d99c8..279e261e4720 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1000,7 +1000,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) flags, vid); ether_addr_copy(priv->mac_addr, addr->sa_data); - ether_addr_copy(ndev->dev_addr, priv->mac_addr); + eth_hw_addr_set(ndev, priv->mac_addr); cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv); pm_runtime_put(cpsw->dev); @@ -1401,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) dev_info(cpsw->dev, "Random MACID = %pM\n", priv->mac_addr); } - ether_addr_copy(ndev->dev_addr, slave_data->mac_addr); + eth_hw_addr_set(ndev, slave_data->mac_addr); ether_addr_copy(priv->mac_addr, slave_data->mac_addr); cpsw->slaves[i].ndev = ndev; @@ -1810,12 +1810,6 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) dl_priv = devlink_priv(cpsw->devlink); dl_priv->cpsw = cpsw; - ret = devlink_register(cpsw->devlink); - if (ret) { - dev_err(dev, "DL reg fail ret:%d\n", ret); - goto dl_free; - } - ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, ARRAY_SIZE(cpsw_devlink_params)); if (ret) { @@ -1823,22 +1817,19 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) goto dl_unreg; } - devlink_params_publish(cpsw->devlink); + devlink_register(cpsw->devlink); return ret; dl_unreg: - devlink_unregister(cpsw->devlink); -dl_free: devlink_free(cpsw->devlink); return ret; } static void cpsw_unregister_devlink(struct cpsw_common *cpsw) { - devlink_params_unpublish(cpsw->devlink); + devlink_unregister(cpsw->devlink); devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, ARRAY_SIZE(cpsw_devlink_params)); - devlink_unregister(cpsw->devlink); devlink_free(cpsw->devlink); } diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 43222a34cba0..dc70a6bfaa6a 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -669,10 +669,10 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) goto mux_fail; } - parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents), - GFP_KERNEL); + parent_names = devm_kcalloc(cpts->dev, num_parents, + sizeof(*parent_names), GFP_KERNEL); - mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents, + mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table), GFP_KERNEL); if (!mux_table || !parent_names) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index e8291d848839..2d2dcf70563f 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1132,7 +1132,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); - memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, sa->sa_data); /* MAC address is configured only after the interface is enabled. */ if (netif_running(ndev)) { @@ -1402,7 +1402,6 @@ static int match_first_device(struct device *dev, const void *data) static int emac_dev_open(struct net_device *ndev) { struct device *emac_dev = &ndev->dev; - u32 cnt; struct resource *res; int q, m, ret; int res_num = 0, irq_num = 0; @@ -1420,8 +1419,7 @@ static int emac_dev_open(struct net_device *ndev) } netif_carrier_off(ndev); - for (cnt = 0; cnt < ETH_ALEN; cnt++) - ndev->dev_addr[cnt] = priv->mac_addr[cnt]; + eth_hw_addr_set(ndev, priv->mac_addr); /* Configuration items */ priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; @@ -1899,7 +1897,7 @@ static int davinci_emac_probe(struct platform_device *pdev) rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); if (!rc) - ether_addr_copy(ndev->dev_addr, priv->mac_addr); + eth_hw_addr_set(ndev, priv->mac_addr); if (!is_valid_ether_addr(priv->mac_addr)) { /* Use random MAC if still none obtained. */ diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index eda2961c0fe2..b818e4579f6f 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2028,16 +2028,16 @@ static int netcp_create_interface(struct netcp_device *netcp_device, emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); if (is_valid_ether_addr(efuse_mac_addr)) - ether_addr_copy(ndev->dev_addr, efuse_mac_addr); + eth_hw_addr_set(ndev, efuse_mac_addr); else - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); } else { - ret = of_get_mac_address(node_interface, ndev->dev_addr); + ret = of_get_ethdev_address(node_interface, ndev); if (ret) - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); } ret = of_property_read_string(node_interface, "rx-channel", diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 77c448ad67ce..eab7d78d7c72 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -184,7 +184,7 @@ static void tlan_print_list(struct tlan_list *, char *, int); static void tlan_read_and_clear_stats(struct net_device *, int); static void tlan_reset_adapter(struct net_device *); static void tlan_finish_reset(struct net_device *); -static void tlan_set_mac(struct net_device *, int areg, char *mac); +static void tlan_set_mac(struct net_device *, int areg, const char *mac); static void __tlan_phy_print(struct net_device *); static void tlan_phy_print(struct net_device *); @@ -2346,7 +2346,7 @@ tlan_finish_reset(struct net_device *dev) * **************************************************************/ -static void tlan_set_mac(struct net_device *dev, int areg, char *mac) +static void tlan_set_mac(struct net_device *dev, int areg, const char *mac) { int i; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 55e652624bd7..3dbfb1b20649 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1477,7 +1477,7 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) __func__, status); return -EINVAL; } - memcpy(netdev->dev_addr, &v1, ETH_ALEN); + eth_hw_addr_set(netdev, (u8 *)&v1); if (card->vlan_required) { netdev->hard_header_len += VLAN_HLEN; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 66d4e024d11e..f50f9a43d3ea 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1296,7 +1296,7 @@ spider_net_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, addr->sa_data); /* switch off GMACTPE and GMACRPE */ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 52245ac60fc7..f8b9d10dc056 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -708,7 +708,7 @@ static int tc35815_read_plat_dev_addr(struct net_device *dev) lp->pci_dev, tc35815_mac_match); if (pd) { if (pd->platform_data) - memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN); + eth_hw_addr_set(dev, pd->platform_data); put_device(pd); return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; } @@ -1859,7 +1859,8 @@ static struct net_device_stats *tc35815_get_stats(struct net_device *dev) return &dev->stats; } -static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) +static void tc35815_set_cam_entry(struct net_device *dev, int index, + const unsigned char *addr) { struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index f974e70a82e8..88fc65ed0017 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -985,7 +985,7 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(sock_addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, sock_addr->sa_data); w5100_write_macaddr(priv); return 0; } @@ -1155,7 +1155,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, INIT_WORK(&priv->restart_work, w5100_restart_work); if (mac_addr) - memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, mac_addr); else eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 46aae30c4636..402d5036f266 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -472,7 +472,7 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr) if (!is_valid_ether_addr(sock_addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, sock_addr->sa_data); w5300_write_macaddr(priv); return 0; } @@ -534,7 +534,7 @@ static int w5300_hw_probe(struct platform_device *pdev) int ret; if (data && is_valid_ether_addr(data->mac_addr)) { - memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); + eth_hw_addr_set(ndev, data->mac_addr); } else { eth_hw_addr_random(ndev); } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 463094ced104..e7065c9a8e38 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) static int temac_init_mac_address(struct net_device *ndev, const void *address) { - memcpy(ndev->dev_addr, address, ETH_ALEN); + eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); @@ -451,7 +451,7 @@ static int temac_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data); temac_do_set_mac_address(ndev); return 0; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 871b5ec3183d..0b7606987c1e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -360,7 +360,7 @@ static void axienet_set_mac_address(struct net_device *ndev, struct axienet_local *lp = netdev_priv(ndev); if (address) - memcpy(ndev->dev_addr, address, ETH_ALEN); + eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b780aad3550a..0815de581c7f 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -206,12 +206,13 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) * This function writes data from a 16-bit aligned buffer to a 32-bit aligned * address in the EmacLite device. */ -static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, +static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, unsigned length) { + const u16 *from_u16_ptr; u32 align_buffer; u32 *to_u32_ptr; - u16 *from_u16_ptr, *to_u16_ptr; + u16 *to_u16_ptr; to_u32_ptr = dest_ptr; from_u16_ptr = src_ptr; @@ -470,7 +471,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) * buffers (if configured). */ static void xemaclite_update_address(struct net_local *drvdata, - u8 *address_ptr) + const u8 *address_ptr) { void __iomem *addr; u32 reg_data; @@ -511,7 +512,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address) if (netif_running(dev)) return -EBUSY; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); xemaclite_update_address(lp, dev->dev_addr); return 0; } @@ -1157,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); - rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr); + rc = of_get_ethdev_address(ofdev->dev.of_node, ndev); if (rc) { dev_warn(dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index ae611e46da6a..ab513dcc3b22 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1271,7 +1271,7 @@ struct set_address_info { unsigned int ioaddr; }; -static void set_address(struct set_address_info *sa_info, char *addr) +static void set_address(struct set_address_info *sa_info, const char *addr) { unsigned int ioaddr = sa_info->ioaddr; int i; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 931494cc1c39..3e5fd952aeaa 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1524,7 +1524,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) port->plat = plat; npe_port_tab[NPE_ID(port->id)] = port; - memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN); + eth_hw_addr_set(ndev, plat->hwaddr); platform_set_drvdata(pdev, ndev); |