summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2006-08-18 11:02:52 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2006-08-18 11:02:52 -0700
commitef7d1b244fa6c94fb76d5f787b8629df64ea4046 (patch)
treeeb6b35bb250f46d75853764bd9309c75e4f03c72 /drivers
parented0da6fc9d3096f54c4a76737eeae57ac81418cf (diff)
parent78eb887733ec8ff5d6e6c69e3c32a187a9303622 (diff)
downloadlinux-stable-ef7d1b244fa6c94fb76d5f787b8629df64ea4046.tar.gz
linux-stable-ef7d1b244fa6c94fb76d5f787b8629df64ea4046.tar.bz2
linux-stable-ef7d1b244fa6c94fb76d5f787b8629df64ea4046.zip
Merge gregkh@master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2.c49
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/ppp_generic.c30
3 files changed, 49 insertions, 42 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index db73de0d2511..652eb05a6c2d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.4.43"
-#define DRV_MODULE_RELDATE "June 28, 2006"
+#define DRV_MODULE_VERSION "1.4.44"
+#define DRV_MODULE_RELDATE "August 10, 2006"
#define RUN_AT(x) (jiffies + (x))
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
{
- u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
+ u32 diff;
+ smp_mb();
+ diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
if (diff > MAX_TX_DESC_CNT)
diff = (diff & MAX_TX_DESC_CNT) - 1;
return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
- skb = dev_alloc_skb(bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (skb == NULL) {
return -ENOMEM;
}
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
skb_reserve(skb, 8 - align);
}
- skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
}
bp->tx_cons = sw_cons;
+ /* Need to make the tx_cons update visible to bnx2_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnx2_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
- if (unlikely(netif_queue_stopped(bp->dev))) {
- spin_lock(&bp->tx_lock);
+ if (unlikely(netif_queue_stopped(bp->dev)) &&
+ (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+ netif_tx_lock(bp->dev);
if ((netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
-
+ (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
netif_wake_queue(bp->dev);
- }
- spin_unlock(&bp->tx_lock);
+ netif_tx_unlock(bp->dev);
}
}
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
struct sk_buff *new_skb;
- new_skb = dev_alloc_skb(len + 2);
+ new_skb = netdev_alloc_skb(bp->dev, len + 2);
if (new_skb == NULL)
goto reuse_rx;
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
- new_skb->dev = bp->dev;
bnx2_reuse_rx_skb(bp, skb,
sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
struct tx_bd *txbd;
u32 val;
+ bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
return -EINVAL;
pkt_size = 1514;
- skb = dev_alloc_skb(pkt_size);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
#endif
/* Called with netif_tx_lock.
- * hard_start_xmit is pseudo-lockless - a lock is only required when
- * the tx queue is full. This way, we get the benefit of lockless
- * operations most of the time without the complexities to handle
- * netif_stop_queue/wake_queue race conditions.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
*/
static int
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
- spin_lock(&bp->tx_lock);
netif_stop_queue(dev);
-
- if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
+ if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
netif_wake_queue(dev);
- spin_unlock(&bp->tx_lock);
}
return NETDEV_TX_OK;
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- spin_lock_init(&bp->tx_lock);
INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[5] = (u8) reg;
bp->tx_ring_size = MAX_TX_DESC_CNT;
- bnx2_set_rx_ring_size(bp, 100);
+ bnx2_set_rx_ring_size(bp, 255);
bp->rx_csum = 1;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 658c5ee95c73..fe804763c607 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3890,10 +3890,6 @@ struct bnx2 {
u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
u16 tx_prod;
- struct tx_bd *tx_desc_ring;
- struct sw_bd *tx_buf_ring;
- int tx_ring_size;
-
u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
u16 hw_tx_cons;
@@ -3916,9 +3912,11 @@ struct bnx2 {
struct sw_bd *rx_buf_ring;
struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
- /* Only used to synchronize netif_stop_queue/wake_queue when tx */
- /* ring is full */
- spinlock_t tx_lock;
+ /* TX constants */
+ struct tx_bd *tx_desc_ring;
+ struct sw_bd *tx_buf_ring;
+ int tx_ring_size;
+ u32 tx_wake_thresh;
/* End of fields used in the performance code paths. */
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0ec6e9d57b94..c872f7c6cce3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -192,7 +192,7 @@ struct cardmap {
void *ptr[CARDMAP_WIDTH];
};
static void *cardmap_get(struct cardmap *map, unsigned int nr);
-static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
+static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static unsigned int cardmap_find_first_free(struct cardmap *map);
static void cardmap_destroy(struct cardmap **map);
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
{
struct channel *pch;
- pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (pch == 0)
return -ENOMEM;
- memset(pch, 0, sizeof(struct channel));
pch->ppp = NULL;
pch->chan = chan;
chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
int ret = -ENOMEM;
int i;
- ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
+ ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
if (!ppp)
goto out;
dev = alloc_netdev(0, "", ppp_setup);
if (!dev)
goto out1;
- memset(ppp, 0, sizeof(struct ppp));
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
}
atomic_inc(&ppp_unit_count);
- cardmap_set(&all_ppp_units, unit, ppp);
+ ret = cardmap_set(&all_ppp_units, unit, ppp);
+ if (ret != 0)
+ goto out3;
+
mutex_unlock(&all_ppp_mutex);
*retp = 0;
return ppp;
+out3:
+ atomic_dec(&ppp_unit_count);
out2:
mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
return NULL;
}
-static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
+static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
{
struct cardmap *p;
int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
do {
/* need a new top level */
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->ptr[0] = p;
if (p != NULL) {
np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
while (p->shift > 0) {
i = (nr >> p->shift) & CARDMAP_MASK;
if (p->ptr[i] == NULL) {
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->shift = p->shift - CARDMAP_ORDER;
np->parent = p;
p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
set_bit(i, &p->inuse);
else
clear_bit(i, &p->inuse);
+ return 0;
+ enomem:
+ return -ENOMEM;
}
static unsigned int cardmap_find_first_free(struct cardmap *map)