diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-03 14:04:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-03 14:04:18 -0700 |
commit | 5bb053bef82523a8fd78d650bca81c9f114fa276 (patch) | |
tree | 58c2fe47f60bb69230bb05d57a6c9e3f47f7b1fe /drivers/net/ethernet/marvell | |
parent | bb2407a7219760926760f0448fddf00d625e5aec (diff) | |
parent | 159f02977b2feb18a4bece5e586c838a6d26d44b (diff) | |
download | linux-stable-5bb053bef82523a8fd78d650bca81c9f114fa276.tar.gz linux-stable-5bb053bef82523a8fd78d650bca81c9f114fa276.tar.bz2 linux-stable-5bb053bef82523a8fd78d650bca81c9f114fa276.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Support offloading wireless authentication to userspace via
NL80211_CMD_EXTERNAL_AUTH, from Srinivas Dasari.
2) A lot of work on network namespace setup/teardown from Kirill Tkhai.
Setup and cleanup of namespaces now all run asynchronously and thus
performance is significantly increased.
3) Add rx/tx timestamping support to mv88e6xxx driver, from Brandon
Streiff.
4) Support zerocopy on RDS sockets, from Sowmini Varadhan.
5) Use denser instruction encoding in x86 eBPF JIT, from Daniel
Borkmann.
6) Support hw offload of vlan filtering in mvpp2 dreiver, from Maxime
Chevallier.
7) Support grafting of child qdiscs in mlxsw driver, from Nogah
Frankel.
8) Add packet forwarding tests to selftests, from Ido Schimmel.
9) Deal with sub-optimal GSO packets better in BBR congestion control,
from Eric Dumazet.
10) Support 5-tuple hashing in ipv6 multipath routing, from David Ahern.
11) Add path MTU tests to selftests, from Stefano Brivio.
12) Various bits of IPSEC offloading support for mlx5, from Aviad
Yehezkel, Yossi Kuperman, and Saeed Mahameed.
13) Support RSS spreading on ntuple filters in SFC driver, from Edward
Cree.
14) Lots of sockmap work from John Fastabend. Applications can use eBPF
to filter sendmsg and sendpage operations.
15) In-kernel receive TLS support, from Dave Watson.
16) Add XDP support to ixgbevf, this is significant because it should
allow optimized XDP usage in various cloud environments. From Tony
Nguyen.
17) Add new Intel E800 series "ice" ethernet driver, from Anirudh
Venkataramanan et al.
18) IP fragmentation match offload support in nfp driver, from Pieter
Jansen van Vuuren.
19) Support XDP redirect in i40e driver, from Björn Töpel.
20) Add BPF_RAW_TRACEPOINT program type for accessing the arguments of
tracepoints in their raw form, from Alexei Starovoitov.
21) Lots of striding RQ improvements to mlx5 driver with many
performance improvements, from Tariq Toukan.
22) Use rhashtable for inet frag reassembly, from Eric Dumazet.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1678 commits)
net: mvneta: improve suspend/resume
net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
ipv6: frags: fix /proc/sys/net/ipv6/ip6frag_low_thresh
net: bgmac: Fix endian access in bgmac_dma_tx_ring_free()
net: bgmac: Correctly annotate register space
route: check sysctl_fib_multipath_use_neigh earlier than hash
fix typo in command value in drivers/net/phy/mdio-bitbang.
sky2: Increase D3 delay to sky2 stops working after suspend
net/mlx5e: Set EQE based as default TX interrupt moderation mode
ibmvnic: Disable irqs before exiting reset from closed state
net: sched: do not emit messages while holding spinlock
vlan: also check phy_driver ts_info for vlan's real device
Bluetooth: Mark expected switch fall-throughs
Bluetooth: Set HCI_QUIRK_SIMULTANEOUS_DISCOVERY for BTUSB_QCA_ROME
Bluetooth: btrsi: remove unused including <linux/version.h>
Bluetooth: hci_bcm: Remove DMI quirk for the MINIX Z83-4
sh_eth: kill useless check in __sh_eth_get_regs()
sh_eth: add sh_eth_cpu_data::no_xdfar flag
ipv6: factorize sk_wmem_alloc updates done by __ip6_append_data()
ipv4: factorize sk_wmem_alloc updates done by __ip_append_data()
...
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r-- | drivers/net/ethernet/marvell/mvneta.c | 188 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/mvpp2.c | 1264 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/sky2.c | 4 |
4 files changed, 991 insertions, 467 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3f6fb635738c..17a904cc6a5e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1556,7 +1556,6 @@ static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, { mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), value | MVNETA_RXQ_NON_OCCUPIED(0)); - rxq->pkts_coal = value; } /* Set the time delay in usec before RX interrupt will be generated by @@ -1572,7 +1571,6 @@ static void mvneta_rx_time_coal_set(struct mvneta_port *pp, val = (clk_rate / 1000000) * value; mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); - rxq->time_coal = value; } /* Set threshold for TX_DONE pkts coalescing */ @@ -1587,8 +1585,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, val |= MVNETA_TXQ_SENT_THRESH_MASK(value); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); - - txq->done_pkts_coal = value; } /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ @@ -2801,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp) /* Rx/Tx queue initialization/cleanup methods */ -/* Create a specified RX queue */ -static int mvneta_rxq_init(struct mvneta_port *pp, - struct mvneta_rx_queue *rxq) - +static int mvneta_rxq_sw_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) { rxq->size = pp->rx_ring_size; @@ -2817,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp, rxq->last_desc = rxq->size - 1; + return 0; +} + +static void mvneta_rxq_hw_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ /* Set Rx descriptors queue starting address */ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); @@ -2840,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp, mvneta_rxq_short_pool_set(pp, rxq); mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); } +} + +/* Create a specified RX queue */ +static int mvneta_rxq_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) + +{ + int ret; + + ret = mvneta_rxq_sw_init(pp, rxq); + if (ret < 0) + return ret; + + mvneta_rxq_hw_init(pp, rxq); return 0; } @@ -2862,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, rxq->descs_phys = 0; } -/* Create and initialize a tx queue */ -static int mvneta_txq_init(struct mvneta_port *pp, - struct mvneta_tx_queue *txq) +static int mvneta_txq_sw_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) { int cpu; @@ -2877,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; txq->tx_wake_threshold = txq->tx_stop_threshold / 2; - /* Allocate memory for TX descriptors */ txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -2887,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->last_desc = txq->size - 1; - /* Set maximum bandwidth for enabled TXQs */ - mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); - mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); - - /* Set Tx descriptors queue starting address */ - mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); - mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); - txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), GFP_KERNEL); if (!txq->tx_skb) { @@ -2915,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->descs, txq->descs_phys); return -ENOMEM; } - mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); /* Setup XPS mapping */ if (txq_number > 1) @@ -2928,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp, return 0; } +static void mvneta_txq_hw_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + /* Set maximum bandwidth for enabled TXQs */ + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); + + /* Set Tx descriptors queue starting address */ + mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); + + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); +} + +/* Create and initialize a tx queue */ +static int mvneta_txq_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + int ret; + + ret = mvneta_txq_sw_init(pp, txq); + if (ret < 0) + return ret; + + mvneta_txq_hw_init(pp, txq); + + return 0; +} + /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ -static void mvneta_txq_deinit(struct mvneta_port *pp, - struct mvneta_tx_queue *txq) +static void mvneta_txq_sw_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); @@ -2951,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, txq->last_desc = 0; txq->next_desc_to_proc = 0; txq->descs_phys = 0; +} +static void mvneta_txq_hw_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ /* Set minimum bandwidth for disabled TXQs */ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); @@ -2961,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); } +static void mvneta_txq_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + mvneta_txq_sw_deinit(pp, txq); + mvneta_txq_hw_deinit(pp, txq); +} + /* Cleanup all Tx queues */ static void mvneta_cleanup_txqs(struct mvneta_port *pp) { @@ -3397,7 +3440,8 @@ static void mvneta_set_eee(struct mvneta_port *pp, bool enable) mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); } -static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) +static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode, + phy_interface_t interface) { struct mvneta_port *pp = netdev_priv(ndev); u32 val; @@ -3416,6 +3460,7 @@ static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) } static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) { struct mvneta_port *pp = netdev_priv(ndev); @@ -4074,22 +4119,6 @@ static int mvneta_ethtool_set_wol(struct net_device *dev, return ret; } -static int mvneta_ethtool_get_module_info(struct net_device *dev, - struct ethtool_modinfo *modinfo) -{ - struct mvneta_port *pp = netdev_priv(dev); - - return phylink_ethtool_get_module_info(pp->phylink, modinfo); -} - -static int mvneta_ethtool_get_module_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, u8 *buf) -{ - struct mvneta_port *pp = netdev_priv(dev); - - return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf); -} - static int mvneta_ethtool_get_eee(struct net_device *dev, struct ethtool_eee *eee) { @@ -4164,8 +4193,6 @@ static const struct ethtool_ops mvneta_eth_tool_ops = { .set_link_ksettings = mvneta_ethtool_set_link_ksettings, .get_wol = mvneta_ethtool_get_wol, .set_wol = mvneta_ethtool_set_wol, - .get_module_info = mvneta_ethtool_get_module_info, - .get_module_eeprom = mvneta_ethtool_get_module_eeprom, .get_eee = mvneta_ethtool_get_eee, .set_eee = mvneta_ethtool_set_eee, }; @@ -4544,16 +4571,45 @@ static int mvneta_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int mvneta_suspend(struct device *device) { + int queue; struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); + if (!netif_running(dev)) + goto clean_exit; + + if (!pp->neta_armada3700) { + spin_lock(&pp->lock); + pp->is_stopped = true; + spin_unlock(&pp->lock); + + cpuhp_state_remove_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + } + rtnl_lock(); - if (netif_running(dev)) - mvneta_stop(dev); + mvneta_stop_dev(pp); rtnl_unlock(); + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + mvneta_rxq_drop_pkts(pp, rxq); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + + mvneta_txq_hw_deinit(pp, txq); + } + +clean_exit: netif_device_detach(dev); clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); + return 0; } @@ -4562,7 +4618,7 @@ static int mvneta_resume(struct device *device) struct platform_device *pdev = to_platform_device(device); struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); - int err; + int err, queue; clk_prepare_enable(pp->clk); if (!IS_ERR(pp->clk_bus)) @@ -4584,12 +4640,38 @@ static int mvneta_resume(struct device *device) } netif_device_attach(dev); - rtnl_lock(); - if (netif_running(dev)) { - mvneta_open(dev); - mvneta_set_rx_mode(dev); + + if (!netif_running(dev)) + return 0; + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + + rxq->next_desc_to_proc = 0; + mvneta_rxq_hw_init(pp, rxq); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + + txq->next_desc_to_proc = 0; + mvneta_txq_hw_init(pp, txq); + } + + if (!pp->neta_armada3700) { + spin_lock(&pp->lock); + pp->is_stopped = false; + spin_unlock(&pp->lock); + cpuhp_state_add_instance_nocalls(online_hpstate, + &pp->node_online); + cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); } + + rtnl_lock(); + mvneta_start_dev(pp); rtnl_unlock(); + mvneta_set_rx_mode(dev); return 0; } @@ -4656,8 +4738,8 @@ MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_LICENSE("GPL"); -module_param(rxq_number, int, S_IRUGO); -module_param(txq_number, int, S_IRUGO); +module_param(rxq_number, int, 0444); +module_param(txq_number, int, 0444); -module_param(rxq_def, int, S_IRUGO); -module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); +module_param(rxq_def, int, 0444); +module_param(rx_copybreak, int, 0644); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 5a1668cdb461..7fc1bbf51c44 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -44,6 +44,7 @@ #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 #define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) #define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) /* RX DMA Top Registers */ @@ -65,6 +66,10 @@ #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 #define MVPP2_RXQ_DISABLE_MASK BIT(31) +/* Top Registers */ +#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) +#define MVPP2_DSA_EXTENDED BIT(5) + /* Parser Registers */ #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 #define MVPP2_PRS_PORT_LU_MAX 0xf @@ -254,6 +259,7 @@ #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) #define MVPP2_BM_START_MASK BIT(0) @@ -473,6 +479,7 @@ #define MVPP2_ETH_TYPE_LEN 2 #define MVPP2_PPPOE_HDR_SIZE 8 #define MVPP2_VLAN_TAG_LEN 4 +#define MVPP2_VLAN_TAG_EDSA_LEN 8 /* Lbtd 802.3 type */ #define MVPP2_IP_LBDT_TYPE 0xfffa @@ -536,6 +543,11 @@ /* TX FIFO constants */ #define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa #define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 +#define MVPP2_TX_FIFO_THRESHOLD_10KB \ + (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP2_TX_FIFO_THRESHOLD_3KB \ + (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ @@ -589,6 +601,9 @@ enum mvpp2_tag_type { #define MVPP2_PRS_TCAM_PROTO_MASK 0xff #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f #define MVPP2_PRS_DBL_VLANS_MAX 100 +#define MVPP2_PRS_CAST_MASK BIT(0) +#define MVPP2_PRS_MCAST_VAL BIT(0) +#define MVPP2_PRS_UCAST_VAL 0x0 /* Tcam structure: * - lookup ID - 4 bits @@ -609,35 +624,81 @@ enum mvpp2_tag_type { #define MVPP2_PRS_TCAM_LU_BYTE 20 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) #define MVPP2_PRS_TCAM_INV_WORD 5 + +#define MVPP2_PRS_VID_TCAM_BYTE 2 + +/* TCAM range for unicast and multicast filtering. We have 25 entries per port, + * with 4 dedicated to UC filtering and the rest to multicast filtering. + * Additionnally we reserve one entry for the broadcast address, and one for + * each port's own address. + */ +#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 +#define MVPP2_PRS_MAC_RANGE_SIZE 80 + +/* Number of entries per port dedicated to UC and MC filtering */ +#define MVPP2_PRS_MAC_UC_FILT_MAX 4 +#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ + MVPP2_PRS_MAC_UC_FILT_MAX) + +/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 + * 10 VLAN ID filter entries per port + * 1 default VLAN filter entry per port + * It is assumed that there are 3 ports for filter, not including loopback port + */ +#define MVPP2_PRS_VLAN_FILT_MAX 11 +#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 + +#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) +#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) + /* Tcam entries ID */ #define MVPP2_PE_DROP_ALL 0 #define MVPP2_PE_FIRST_FREE_TID 1 -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) + +/* MAC filtering range */ +#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ + MVPP2_PRS_MAC_RANGE_SIZE + 1) +/* VLAN filtering range */ +#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ + MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) -#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) -#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) -#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) -#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) -#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) -#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) -#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) -#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) -#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) -#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) -#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) -#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) -#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) -#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) -#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) -#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) -#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) -#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) -#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) -#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) -#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) -#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +/* reserved */ +#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) +#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ + ((port) * MVPP2_PRS_VLAN_FILT_MAX)) +#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) +/* Index of default vid filter for given port */ +#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) + /* Sram structure * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). */ @@ -725,6 +786,7 @@ enum mvpp2_tag_type { #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) #define MVPP2_PRS_SINGLE_VLAN_AI 0 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) +#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) /* DSA/EDSA type */ #define MVPP2_PRS_TAGGED true @@ -747,6 +809,7 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_MAC, MVPP2_PRS_LU_DSA, MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_VID, MVPP2_PRS_LU_L2, MVPP2_PRS_LU_PPPOE, MVPP2_PRS_LU_IP4, @@ -755,6 +818,12 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_LAST, }; +/* L2 cast enum */ +enum mvpp2_prs_l2_cast { + MVPP2_PRS_L2_UNI_CAST, + MVPP2_PRS_L2_MULTI_CAST, +}; + /* L3 cast enum */ enum mvpp2_prs_l3_cast { MVPP2_PRS_L3_UNI_CAST, @@ -772,23 +841,26 @@ enum mvpp2_prs_l3_cast { #define MVPP22_RSS_TABLE_ENTRIES 32 /* BM constants */ -#define MVPP2_BM_POOLS_NUM 8 +#define MVPP2_BM_JUMBO_BUF_NUM 512 #define MVPP2_BM_LONG_BUF_NUM 1024 #define MVPP2_BM_SHORT_BUF_NUM 2048 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) #define MVPP2_BM_POOL_PTR_ALIGN 128 -#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) -#define MVPP2_BM_SWF_SHORT_POOL 3 /* BM cookie (32 bits) definition */ #define MVPP2_BM_COOKIE_POOL_OFFS 8 #define MVPP2_BM_COOKIE_CPU_OFFS 24 +#define MVPP2_BM_SHORT_FRAME_SIZE 512 +#define MVPP2_BM_LONG_FRAME_SIZE 2048 +#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 /* BM short pool packet size * These value assure that for SWF the total number * of bytes allocated for each buffer will be 512 */ -#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) +#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) +#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K @@ -796,12 +868,18 @@ enum mvpp2_prs_l3_cast { #define MVPP2_MAX_THREADS 8 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS -enum mvpp2_bm_type { - MVPP2_BM_FREE, - MVPP2_BM_SWF_LONG, - MVPP2_BM_SWF_SHORT +enum mvpp2_bm_pool_log_num { + MVPP2_BM_SHORT, + MVPP2_BM_LONG, + MVPP2_BM_JUMBO, + MVPP2_BM_POOLS_NUM }; +static struct { + int pkt_size; + int buf_num; +} mvpp2_pools[MVPP2_BM_POOLS_NUM]; + /* GMAC MIB Counters register definitions */ #define MVPP21_MIB_COUNTERS_OFFSET 0x1000 #define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 @@ -1230,7 +1308,6 @@ struct mvpp2_cls_lookup_entry { struct mvpp2_bm_pool { /* Pool number in the range 0-7 */ int id; - enum mvpp2_bm_type type; /* Buffer Pointers Pool External (BPPE) size */ int size; @@ -1282,6 +1359,10 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) return readl(priv->swth_base[0] + offset); } +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +{ + return readl_relaxed(priv->swth_base[0] + offset); +} /* These accessors should be used to access: * * - per-CPU registers, where each CPU has its own copy of the @@ -1330,6 +1411,18 @@ static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, return readl(priv->swth_base[cpu] + offset); } +static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel_relaxed(data, priv->swth_base[cpu] + offset); +} + +static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl_relaxed(priv->swth_base[cpu] + offset); +} + static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { @@ -1505,14 +1598,18 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) return 0; } -/* Read tcam entry from hw */ -static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +/* Initialize tcam entry from hw */ +static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; + memset(pe, 0, sizeof(*pe)); + pe->index = tid; + /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); @@ -1662,6 +1759,14 @@ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); } +/* Set vid in tcam sw entry */ +static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, + unsigned short vid) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); +} + /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, int val) @@ -1828,16 +1933,11 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, } /* Find parser flow entry */ -static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); - /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { u8 bits; @@ -1846,17 +1946,15 @@ static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - pe->index = tid; - mvpp2_prs_hw_read(priv, pe); - bits = mvpp2_prs_sram_ai_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); + bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Return first free tcam index, seeking from start to end */ @@ -1886,8 +1984,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - pe.index = MVPP2_PE_DROP_ALL; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -1914,78 +2011,42 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) mvpp2_prs_hw_write(priv, &pe); } -/* Set port to promiscuous mode */ -static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) +/* Set port to unicast or multicast promiscuous mode */ +static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) { struct mvpp2_prs_entry pe; + unsigned char cast_match; + unsigned int ri; + int tid; - /* Promiscuous mode - Accept unknown packets */ - - if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { - /* Entry exist - update port only */ - pe.index = MVPP2_PE_MAC_PROMISCUOUS; - mvpp2_prs_hw_read(priv, &pe); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { + cast_match = MVPP2_PRS_UCAST_VAL; + tid = MVPP2_PE_MAC_UC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_UCAST; } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = MVPP2_PE_MAC_PROMISCUOUS; - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, - MVPP2_PRS_RI_L2_CAST_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + cast_match = MVPP2_PRS_MCAST_VAL; + tid = MVPP2_PE_MAC_MC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_MCAST; } - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Accept multicast */ -static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, - bool add) -{ - struct mvpp2_prs_entry pe; - unsigned char da_mc; - - /* Ethernet multicast address first byte is - * 0x01 for IPv4 and 0x33 for IPv6 - */ - da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; - - if (priv->prs_shadow[index].valid) { - /* Entry exist - update port only */ - pe.index = index; - mvpp2_prs_hw_read(priv, &pe); + /* promiscuous mode - Accept unknown unicast or multicast packets */ + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_init_from_hw(priv, &pe, tid); } else { - /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = index; + pe.index = tid; /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, - MVPP2_PRS_RI_L2_CAST_MASK); + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); - /* Update tcam entry data first byte */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); + /* Match UC or MC addresses */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, + MVPP2_PRS_CAST_MASK); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, @@ -2021,32 +2082,37 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - pe.index = tid; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; - /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ - mvpp2_prs_sram_shift_set(&pe, shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); if (tagged) { /* Set tagged bit in DSA tag */ mvpp2_prs_tcam_data_byte_set(&pe, 0, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - /* If packet is tagged continue check vlans */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + + /* Set ai bits for next iteration */ + if (extend) + mvpp2_prs_sram_ai_update(&pe, 1, + MVPP2_PRS_SRAM_AI_MASK); + else + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + + /* If packet is tagged continue check vid filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); } else { + /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set result info bits to 'no vlans' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); @@ -2084,8 +2150,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - pe.index = tid; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -2133,17 +2198,11 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, } /* Search for existing single/triple vlan entry */ -static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, - unsigned short tpid, int ai) +static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { @@ -2154,19 +2213,17 @@ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid; - - mvpp2_prs_hw_read(priv, pe); - match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid)); + mvpp2_prs_init_from_hw(priv, &pe, tid); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); if (!match) continue; /* Get vlan type */ - ri_bits = mvpp2_prs_sram_ri_get(pe); + ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; /* Get current ai value from tcam */ - ai_bits = mvpp2_prs_tcam_ai_get(pe); + ai_bits = mvpp2_prs_tcam_ai_get(&pe); /* Clear double vlan bit */ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; @@ -2175,34 +2232,31 @@ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Add/update single/triple vlan entry */ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, unsigned int port_map) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid_aux, tid; int ret = 0; - pe = mvpp2_prs_vlan_find(priv, tpid, ai); + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_vlan_find(priv, tpid, ai); - if (!pe) { + if (tid < 0) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return -ENOMEM; - /* Get last double vlan tid */ for (tid_aux = MVPP2_PE_LAST_FREE_TID; tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { @@ -2212,50 +2266,46 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid_aux; - mvpp2_prs_hw_read(priv, pe); - ri_bits = mvpp2_prs_sram_ri_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) break; } - if (tid <= tid_aux) { - ret = -EINVAL; - goto free_pe; - } + if (tid <= tid_aux) + return -EINVAL; - memset(pe, 0, sizeof(*pe)); - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - pe->index = tid; + memset(&pe, 0, sizeof(pe)); + pe.index = tid; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - mvpp2_prs_match_etype(pe, 0, tpid); + mvpp2_prs_match_etype(&pe, 0, tpid); + + /* VLAN tag detected, proceed with VID filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); - /* Shift 4 bytes - skip 1 vlan tag */ - mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { - mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK); } else { ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; - mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, MVPP2_PRS_RI_VLAN_MASK); } - mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(pe, port_map); + mvpp2_prs_tcam_port_map_set(&pe, port_map); - mvpp2_prs_hw_write(priv, pe); -free_pe: - kfree(pe); + mvpp2_prs_hw_write(priv, &pe); return ret; } @@ -2274,18 +2324,12 @@ static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) } /* Search for existing double vlan entry */ -static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, - unsigned short tpid1, - unsigned short tpid2) +static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { @@ -2296,22 +2340,20 @@ static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid; - mvpp2_prs_hw_read(priv, pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) - && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && + mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); if (!match) continue; - ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; + ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Add or update double vlan entry */ @@ -2319,28 +2361,24 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned short tpid2, unsigned int port_map) { - struct mvpp2_prs_entry *pe; int tid_aux, tid, ai, ret = 0; + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); - pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); - if (!pe) { + if (tid < 0) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return -ENOMEM; - /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) { - ret = ai; - goto free_pe; - } + if (ai < 0) + return ai; /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; @@ -2351,46 +2389,44 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - pe->index = tid_aux; - mvpp2_prs_hw_read(priv, pe); - ri_bits = mvpp2_prs_sram_ri_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) break; } - if (tid >= tid_aux) { - ret = -ERANGE; - goto free_pe; - } + if (tid >= tid_aux) + return -ERANGE; - memset(pe, 0, sizeof(*pe)); - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); - pe->index = tid; + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = tid; priv->prs_double_vlans[ai] = true; - mvpp2_prs_match_etype(pe, 0, tpid1); - mvpp2_prs_match_etype(pe, 4, tpid2); + mvpp2_prs_match_etype(&pe, 0, tpid1); + mvpp2_prs_match_etype(&pe, 4, tpid2); - mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); - /* Shift 8 bytes - skip 2 vlan tags */ - mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + /* Shift 4 bytes - skip outer vlan tag */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(pe, port_map); - mvpp2_prs_hw_write(priv, pe); -free_pe: - kfree(pe); + mvpp2_prs_tcam_port_map_set(&pe, port_map); + mvpp2_prs_hw_write(priv, &pe); + return ret; } @@ -2694,11 +2730,10 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, &pe); - /* place holders only - no ports */ + /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, false); - mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false); - mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -2755,6 +2790,62 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); } +/* Initialize parser entries for VID filtering */ +static void mvpp2_prs_vid_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 4 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vid entry for extended DSA*/ + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, + MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + /* Match basic ethertypes */ static int mvpp2_prs_etype_init(struct mvpp2 *priv) { @@ -3023,7 +3114,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_DBL; - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + /* Clear ai for next iterations */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, @@ -3386,6 +3478,192 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) return 0; } +/* Find tcam entry with matched pair <vid,port> */ +static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, + u16 mask) +{ + unsigned char byte[2], enable[2]; + struct mvpp2_prs_entry pe; + u16 rvid, rmask; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VID */ + for (tid = MVPP2_PE_VID_FILT_RANGE_START; + tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + rmask = ((enable[0] & 0xf) << 8) + enable[1]; + + if (rvid != vid || rmask != mask) + continue; + + return tid; + } + + return -ENOENT; +} + +/* Write parser entry for VID filtering */ +static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) +{ + unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + + port->id * MVPP2_PRS_VLAN_FILT_MAX; + unsigned int mask = 0xfff, reg_val, shift; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + /* No such entry */ + if (tid < 0) { + + /* Go through all entries from first to last in vlan range */ + tid = mvpp2_prs_tcam_first_free(priv, vid_start, + vid_start + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); + + /* There isn't room for a new VID filter */ + if (tid < 0) + return tid; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Enable the current port */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set match on VID */ + mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Write parser entry for VID filtering */ +static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) +{ + struct mvpp2 *priv = port->priv; + int tid; + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + + /* No such entry */ + if (tid < 0) + return; + + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; +} + +/* Remove all existing VID filters on this port */ +static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (priv->prs_shadow[tid].valid) + mvpp2_prs_vid_entry_remove(port, tid); + } +} + +/* Remove VID filering entry for this port */ +static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + + /* Invalidate the guard entry */ + mvpp2_prs_hw_inv(priv, tid); + + priv->prs_shadow[tid].valid = false; +} + +/* Add guard entry that drops packets when no VID is matched on this port */ +static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + unsigned int reg_val, shift; + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[tid].valid) + return; + + memset(&pe, 0, sizeof(pe)); + + pe.index = tid; + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Drop VLAN packets that don't belong to any VIDs on this port */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + /* Parser default initialization */ static int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) @@ -3429,6 +3707,8 @@ static int mvpp2_prs_default_init(struct platform_device *pdev, mvpp2_prs_dsa_init(priv); + mvpp2_prs_vid_init(priv); + err = mvpp2_prs_etype_init(priv); if (err) return err; @@ -3472,21 +3752,16 @@ static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, } /* Find tcam entry with matched pair <MAC DA, port> */ -static struct mvpp2_prs_entry * +static int mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, unsigned char *mask, int udf_type) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = kzalloc(sizeof(*pe), GFP_ATOMIC); - if (!pe) - return NULL; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); - /* Go through the all entires with MVPP2_PRS_LU_MAC */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned int entry_pmap; if (!priv->prs_shadow[tid].valid || @@ -3494,110 +3769,103 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - pe->index = tid; - mvpp2_prs_hw_read(priv, pe); - entry_pmap = mvpp2_prs_tcam_port_map_get(pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); + entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); - if (mvpp2_prs_mac_range_equals(pe, da, mask) && + if (mvpp2_prs_mac_range_equals(&pe, da, mask) && entry_pmap == pmap) - return pe; + return tid; } - kfree(pe); - return NULL; + return -ENOENT; } /* Update parser's mac da entry */ -static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, - const u8 *da, bool add) +static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, + bool add) { - struct mvpp2_prs_entry *pe; - unsigned int pmap, len, ri; unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct mvpp2 *priv = port->priv; + unsigned int pmap, len, ri; + struct mvpp2_prs_entry pe; int tid; + memset(&pe, 0, sizeof(pe)); + /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ - pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, - MVPP2_PRS_UDF_MAC_DEF); + tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, + MVPP2_PRS_UDF_MAC_DEF); /* No such entry */ - if (!pe) { + if (tid < 0) { if (!add) return 0; /* Create new TCAM entry */ - /* Find first range mac entry*/ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) - if (priv->prs_shadow[tid].valid && - (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && - (priv->prs_shadow[tid].udf == - MVPP2_PRS_UDF_MAC_RANGE)) - break; - /* Go through the all entries from first to last */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - tid - 1); + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_MAC_RANGE_START, + MVPP2_PE_MAC_RANGE_END); if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_ATOMIC); - if (!pe) - return -ENOMEM; - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); - pe->index = tid; + pe.index = tid; /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(pe, 0); + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); } + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + /* Update port mask */ - mvpp2_prs_tcam_port_set(pe, port, add); + mvpp2_prs_tcam_port_set(&pe, port->id, add); /* Invalidate the entry if no ports are left enabled */ - pmap = mvpp2_prs_tcam_port_map_get(pe); + pmap = mvpp2_prs_tcam_port_map_get(&pe); if (pmap == 0) { - if (add) { - kfree(pe); + if (add) return -EINVAL; - } - mvpp2_prs_hw_inv(priv, pe->index); - priv->prs_shadow[pe->index].valid = false; - kfree(pe); + + mvpp2_prs_hw_inv(priv, pe.index); + priv->prs_shadow[pe.index].valid = false; return 0; } /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set match on DA */ len = ETH_ALEN; while (len--) - mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); /* Set result info bits */ - if (is_broadcast_ether_addr(da)) + if (is_broadcast_ether_addr(da)) { ri = MVPP2_PRS_RI_L2_BCAST; - else if (is_multicast_ether_addr(da)) + } else if (is_multicast_ether_addr(da)) { ri = MVPP2_PRS_RI_L2_MCAST; - else - ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; + } else { + ri = MVPP2_PRS_RI_L2_UCAST; + + if (ether_addr_equal(da, port->dev->dev_addr)) + ri |= MVPP2_PRS_RI_MAC_ME_MASK; + } - mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); - mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table and hw entry */ - priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; - mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, pe); - - kfree(pe); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); return 0; } @@ -3608,13 +3876,12 @@ static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) int err; /* Remove old parser entry */ - err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, - false); + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); if (err) return err; /* Add new parser entry */ - err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); + err = mvpp2_prs_mac_da_accept(port, da, true); if (err) return err; @@ -3624,14 +3891,15 @@ static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) return 0; } -/* Delete all port's multicast simple (not range) entries */ -static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) +static void mvpp2_prs_mac_del_all(struct mvpp2_port *port) { + struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; + unsigned long pmap; int index, tid; - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; if (!priv->prs_shadow[tid].valid || @@ -3639,18 +3907,28 @@ static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - /* Only simple mac entries */ - pe.index = tid; - mvpp2_prs_hw_read(priv, &pe); + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; /* Read mac addr from entry */ for (index = 0; index < ETH_ALEN; index++) mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], &da_mask[index]); - if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da)) - /* Delete this entry */ - mvpp2_prs_mac_da_accept(priv, port, da, false); + /* Special cases : Don't remove broadcast and port's own + * address + */ + if (is_broadcast_ether_addr(da) || + ether_addr_equal(da, port->dev->dev_addr)) + continue; + + /* Remove entry from TCAM */ + mvpp2_prs_mac_da_accept(port, da, false); } } @@ -3707,13 +3985,15 @@ static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) /* Set prs flow for the port */ static int mvpp2_prs_def_flow(struct mvpp2_port *port) { - struct mvpp2_prs_entry *pe; + struct mvpp2_prs_entry pe; int tid; - pe = mvpp2_prs_flow_find(port->priv, port->id); + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ - if (!pe) { + if (tid < 0) { /* Go through the all entires from last to first */ tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, @@ -3721,24 +4001,21 @@ static int mvpp2_prs_def_flow(struct mvpp2_port *port) if (tid < 0) return tid; - pe = kzalloc(sizeof(*pe), GFP_KERNEL); - if (!pe) - return -ENOMEM; - - mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); - pe->index = tid; + pe.index = tid; /* Set flow ID*/ - mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); /* Update shadow table */ - mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); + } else { + mvpp2_prs_init_from_hw(port->priv, &pe, tid); } - mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); - mvpp2_prs_hw_write(port->priv, pe); - kfree(pe); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, &pe); return 0; } @@ -3901,7 +4178,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, val |= MVPP2_BM_START_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - bm_pool->type = MVPP2_BM_FREE; bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; @@ -3954,11 +4230,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, /* Free all buffers from the pool */ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool) + struct mvpp2_bm_pool *bm_pool, int buf_num) { int i; - for (i = 0; i < bm_pool->buf_num; i++) { + if (buf_num > bm_pool->buf_num) { + WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", + bm_pool->id, buf_num); + buf_num = bm_pool->buf_num; + } + + for (i = 0; i < buf_num; i++) { dma_addr_t buf_dma_addr; phys_addr_t buf_phys_addr; void *data; @@ -3980,16 +4262,39 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, bm_pool->buf_num -= i; } +/* Check number of buffers in BM pool */ +static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +{ + int buf_num = 0; + + buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & + MVPP22_BM_POOL_PTRS_NUM_MASK; + buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & + MVPP2_BM_BPPI_PTR_NUM_MASK; + + /* HW has one buffer ready which is not reflected in the counters */ + if (buf_num) + buf_num += 1; + + return buf_num; +} + /* Cleanup pool */ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { + int buf_num; u32 val; - mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); - if (bm_pool->buf_num) { - WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); + + /* Check buffer counters after free */ + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + if (buf_num) { + WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", + bm_pool->id, bm_pool->buf_num); return 0; } @@ -4051,6 +4356,21 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) return 0; } +static void mvpp2_setup_bm_pool(void) +{ + /* Short pool */ + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; + + /* Long pool */ + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; + + /* Jumbo pool */ + mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; + mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; +} + /* Attach long pool to rxq */ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) @@ -4138,8 +4458,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - mvpp2_percpu_write(port->priv, cpu, - MVPP22_BM_ADDR_HIGH_RLS_REG, val); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); } /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply @@ -4147,10 +4467,10 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write(port->priv, cpu, - MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write(port->priv, cpu, - MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); put_cpu(); } @@ -4189,13 +4509,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, bm_pool->buf_num += i; netdev_dbg(port->dev, - "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", - bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", + "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", bm_pool->id, bm_pool->pkt_size, buf_size, total_size); netdev_dbg(port->dev, - "%s pool %d: %d of %d buffers added\n", - bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", + "pool %d: %d of %d buffers added\n", bm_pool->id, i, buf_num); return i; } @@ -4204,25 +4522,20 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, * pool pointer on success */ static struct mvpp2_bm_pool * -mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, - int pkt_size) +mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) { struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; - if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { - netdev_err(port->dev, "mixing pool types is forbidden\n"); + if (pool >= MVPP2_BM_POOLS_NUM) { + netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL; } - if (new_pool->type == MVPP2_BM_FREE) - new_pool->type = type; - /* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet */ - if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || - (new_pool->pkt_size == 0)) { + if (new_pool->pkt_size == 0) { int pkts_num; /* Set default buffer number or free all the buffers in case @@ -4230,12 +4543,10 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, */ pkts_num = new_pool->buf_num; if (pkts_num == 0) - pkts_num = type == MVPP2_BM_SWF_LONG ? - MVPP2_BM_LONG_BUF_NUM : - MVPP2_BM_SHORT_BUF_NUM; + pkts_num = mvpp2_pools[pool].buf_num; else mvpp2_bm_bufs_free(port->dev->dev.parent, - port->priv, new_pool); + port->priv, new_pool, pkts_num); new_pool->pkt_size = pkt_size; new_pool->frag_size = @@ -4261,16 +4572,28 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { int rxq; + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; + + /* If port pkt_size is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + long_log_pool = MVPP2_BM_JUMBO; + short_log_pool = MVPP2_BM_LONG; + } else { + long_log_pool = MVPP2_BM_LONG; + short_log_pool = MVPP2_BM_SHORT; + } if (!port->pool_long) { port->pool_long = - mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), - MVPP2_BM_SWF_LONG, - port->pkt_size); + mvpp2_bm_pool_use(port, long_log_pool, + mvpp2_pools[long_log_pool].pkt_size); if (!port->pool_long) return -ENOMEM; - port->pool_long->port_map |= (1 << port->id); + port->pool_long->port_map |= BIT(port->id); for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); @@ -4278,13 +4601,12 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_short) { port->pool_short = - mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL, - MVPP2_BM_SWF_SHORT, - MVPP2_BM_SHORT_PKT_SIZE); + mvpp2_bm_pool_use(port, short_log_pool, + mvpp2_pools[short_log_pool].pkt_size); if (!port->pool_short) return -ENOMEM; - port->pool_short->port_map |= (1 << port->id); + port->pool_short->port_map |= BIT(port->id); for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_short_pool_set(port, rxq, @@ -4297,30 +4619,49 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_bm_pool *port_pool = port->pool_long; - int num, pkts_num = port_pool->buf_num; + enum mvpp2_bm_pool_log_num new_long_pool; int pkt_size = MVPP2_RX_PKT_SIZE(mtu); - /* Update BM pool with new buffer size */ - mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); - if (port_pool->buf_num) { - WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); - return -EIO; - } - - port_pool->pkt_size = pkt_size; - port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + - MVPP2_SKB_SHINFO_SIZE; - num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); - if (num != pkts_num) { - WARN(1, "pool %d: %d of %d allocated\n", - port_pool->id, num, pkts_num); - return -EIO; + /* If port MTU is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) + new_long_pool = MVPP2_BM_JUMBO; + else + new_long_pool = MVPP2_BM_LONG; + + if (new_long_pool != port->pool_long->id) { + /* Remove port from old short & long pool */ + port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, + port->pool_long->pkt_size); + port->pool_long->port_map &= ~BIT(port->id); + port->pool_long = NULL; + + port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, + port->pool_short->pkt_size); + port->pool_short->port_map &= ~BIT(port->id); + port->pool_short = NULL; + + port->pkt_size = pkt_size; + + /* Add port to new short & long pool */ + mvpp2_swf_bm_pool_init(port); + + /* Update L4 checksum when jumbo enable/disable on port */ + if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + } else { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + } } - mvpp2_bm_pool_bufsize_set(port->priv, port_pool, - MVPP2_RX_BUF_SIZE(port_pool->pkt_size)); dev->mtu = mtu; + dev->wanted_features = dev->features; + netdev_update_features(dev); return 0; } @@ -5221,7 +5562,8 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ int cpu = smp_processor_id(); - u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + u32 val = mvpp2_read_relaxed(priv, + MVPP2_AGGR_TXQ_STATUS_REG(cpu)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; } @@ -5245,9 +5587,9 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, int cpu = smp_processor_id(); val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); - val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@ -5352,8 +5694,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, u32 val; /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read(port->priv, smp_processor_id(), - MVPP2_TXQ_SENT_REG(txq->id)); + val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET; @@ -6719,8 +7061,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { @@ -7007,15 +7349,14 @@ static int mvpp2_open(struct net_device *dev) 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int err; - err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); + err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); if (err) { netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); return err; } - err = mvpp2_prs_mac_da_accept(port->priv, port->id, - dev->dev_addr, true); + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); if (err) { - netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); + netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); return err; } err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); @@ -7129,30 +7470,64 @@ static int mvpp2_stop(struct net_device *dev) return 0; } +static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int ret; + + netdev_hw_addr_list_for_each(ha, list) { + ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); + if (ret) + return ret; + } + + return 0; +} + +static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) +{ + if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + mvpp2_prs_vid_enable_filtering(port); + else + mvpp2_prs_vid_disable_filtering(port); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, enable); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, enable); +} + static void mvpp2_set_rx_mode(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2 *priv = port->priv; - struct netdev_hw_addr *ha; - int id = port->id; - bool allmulti = dev->flags & IFF_ALLMULTI; - -retry: - mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); - mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); - mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); - - /* Remove all port->id's mcast enries */ - mvpp2_prs_mcast_del_all(priv, id); - - if (!allmulti) { - netdev_for_each_mc_addr(ha, dev) { - if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { - allmulti = true; - goto retry; - } - } + + /* Clear the whole UC and MC list */ + mvpp2_prs_mac_del_all(port); + + if (dev->flags & IFF_PROMISC) { + mvpp2_set_rx_promisc(port, true); + return; } + + mvpp2_set_rx_promisc(port, false); + + if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->uc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, true); + + if (dev->flags & IFF_ALLMULTI) { + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); + return; + } + + if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->mc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); } static int mvpp2_set_mac_address(struct net_device *dev, void *p) @@ -7292,6 +7667,48 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return ret; } +static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + ret = mvpp2_prs_vid_entry_add(port, vid); + if (ret) + netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", + MVPP2_PRS_VLAN_FILT_MAX - 1); + return ret; +} + +static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + + mvpp2_prs_vid_entry_remove(port, vid); + return 0; +} + +static int mvpp2_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct mvpp2_port *port = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + mvpp2_prs_vid_enable_filtering(port); + } else { + /* Invalidate all registered VID filters for this + * port + */ + mvpp2_prs_vid_remove_all(port); + + mvpp2_prs_vid_disable_filtering(port); + } + } + + return 0; +} + /* Ethtool methods */ /* Set interrupt coalescing for ethtools */ @@ -7433,6 +7850,9 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, .ndo_do_ioctl = mvpp2_ioctl, + .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, + .ndo_set_features = mvpp2_set_features, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { @@ -7943,16 +8363,25 @@ static int mvpp2_port_probe(struct platform_device *pdev, } } - features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO; dev->features = features | NETIF_F_RXCSUM; - dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; + dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + } + dev->vlan_features |= features; dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + dev->priv_flags |= IFF_UNICAST_FLT; - /* MTU range: 68 - 9676 */ + /* MTU range: 68 - 9704 */ dev->min_mtu = ETH_MIN_MTU; - /* 9676 == 9700 - 20 and rounding to 8 */ - dev->max_mtu = 9676; + /* 9704 == 9728 - 20 and rounding to 8 */ + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; err = register_netdev(dev); if (err < 0) { @@ -8083,14 +8512,25 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } -/* Initialize Tx FIFO's */ +/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G + * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, + * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. + */ static void mvpp22_tx_fifo_init(struct mvpp2 *priv) { - int port; + int port, size, thrs; - for (port = 0; port < MVPP2_MAX_PORTS; port++) - mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), - MVPP22_TX_FIFO_DATA_SIZE_3KB); + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + if (port == 0) { + size = MVPP22_TX_FIFO_DATA_SIZE_10KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; + } else { + size = MVPP22_TX_FIFO_DATA_SIZE_3KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; + } + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); + mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); + } } static void mvpp2_axi_init(struct mvpp2 *priv) @@ -8284,6 +8724,8 @@ static int mvpp2_probe(struct platform_device *pdev) priv->sysctrl_base = NULL; } + mvpp2_setup_bm_pool(); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 31efc47c847e..9c08c3650c02 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3783,7 +3783,7 @@ static int skge_device_event(struct notifier_block *unused, break; case NETDEV_UP: - d = debugfs_create_file(dev->name, S_IRUGO, + d = debugfs_create_file(dev->name, 0444, skge_debug, dev, &skge_debug_fops); if (!d || IS_ERR(d)) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 9fe85300e7b6..697d9b374f5e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4667,7 +4667,7 @@ static int sky2_device_event(struct notifier_block *unused, break; case NETDEV_UP: - sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, + sky2->debugfs = debugfs_create_file(dev->name, 0444, sky2_debug, dev, &sky2_debug_fops); if (IS_ERR(sky2->debugfs)) @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 150; + pdev->d3_delay = 200; return 0; |