summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-29 18:48:54 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-29 18:48:54 -0400
commit64c27237a07129758e33f5f824ba5c33b7f57417 (patch)
tree4c0c0a9b6d282d600f2226e1b3510096b9d789dd
parent77a9939426f7a3f35f460afc9b11f1fe45955409 (diff)
parent49d8137a4039c63c834827f4bfe875e27bb9c521 (diff)
downloadlinux-stable-64c27237a07129758e33f5f824ba5c33b7f57417.tar.gz
linux-stable-64c27237a07129758e33f5f824ba5c33b7f57417.tar.bz2
linux-stable-64c27237a07129758e33f5f824ba5c33b7f57417.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/marvell/mvneta.c The mvneta.c conflict is a case of overlapping changes, a conversion to devm_ioremap_resource() vs. a conversion to netdev_alloc_pcpu_stats. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/topology.h3
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c17
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c11
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c12
-rw-r--r--drivers/input/misc/da9052_onkey.c29
-rw-r--r--drivers/input/mouse/cypress_ps2.c1
-rw-r--r--drivers/input/mouse/synaptics.c55
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/usb/usbnet.c33
-rw-r--r--drivers/net/veth.c5
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/vhost/net.c20
-rw-r--r--drivers/xen/balloon.c24
-rw-r--r--fs/anon_inodes.c34
-rw-r--r--fs/ocfs2/stackglue.c4
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--lib/random32.c13
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/bridge/br_input.c11
-rw-r--r--net/bridge/br_vlan.c44
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/ipv4/gre_demux.c8
-rw-r--r--net/ipv4/ip_tunnel.c3
-rw-r--r--net/ipv4/ip_tunnel_core.c1
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/addrconf.c193
-rw-r--r--net/netfilter/nfnetlink_queue_core.c9
-rw-r--r--net/openvswitch/datapath.c6
-rw-r--r--net/openvswitch/flow.c26
-rw-r--r--net/unix/af_unix.c17
48 files changed, 527 insertions, 267 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index f779c91122bc..162b0fe3b001 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1839,8 +1839,8 @@ F: net/bluetooth/
F: include/net/bluetooth/
BONDING DRIVER
-M: Jay Vosburgh <fubar@us.ibm.com>
-M: Veaceslav Falico <vfalico@redhat.com>
+M: Jay Vosburgh <j.vosburgh@gmail.com>
+M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
@@ -6019,6 +6019,7 @@ F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
F: tools/net/
F: tools/testing/selftests/net/
+F: lib/random32.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5ad38ad07890..bbc8b12fa443 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
-static inline int pteval_present(pteval_t pteval)
-{
- /*
- * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
- * way clearly states that the intent is that protnone and numa
- * hinting ptes are considered present for the purposes of
- * pagetable operations like zapping, protection changes, gup etc.
- */
- return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
-}
-
static inline int pte_present(pte_t a)
{
- return pteval_present(pte_flags(a));
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
}
#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index d35f24e231cd..1306d117967d 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
-#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 256282e7888b..2423ef04ffea 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40a2b36b276b..d278be110805 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -842,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
- false);
+ true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 89c484d8ac26..4ee702ac8907 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- return -EINVAL;
+ pm_runtime_forbid(dev);
+ return -EBUSY;
}
nv_debug_level(SILENT);
@@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
- if (nouveau_runtime_pm == 0)
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 84a1bbb75f91..f633c2782170 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -403,11 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (radeon_runtime_pm == 0)
- return -EINVAL;
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
- if (radeon_runtime_pm == -1 && !radeon_is_px())
- return -EINVAL;
+ if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -456,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_crtc *crtc;
- if (radeon_runtime_pm == 0)
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we PX enabled? */
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
DRM_DEBUG_DRIVER("failing to power off - not px\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8d67b943ac05..0394811251bd 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->vmapping)
udl_gem_vunmap(obj);
- if (gem_obj->import_attach)
+ if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
+ put_device(gem_obj->dev->dev);
+ }
if (obj->pages)
udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
int ret;
/* need to attach */
+ get_device(dev->dev);
attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
+ if (IS_ERR(attach)) {
+ put_device(dev->dev);
return ERR_CAST(attach);
+ }
get_dma_buf(dma_buf);
@@ -282,6 +287,6 @@ fail_unmap:
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
-
+ put_device(dev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index be7f0a20d634..f3b89a4698b6 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -39,7 +39,9 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index bb3b57bea8ba..5ef7fcf0e250 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+ int val;
- return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
+ mutex_lock(&kpad->gpio_lock);
+
+ if (kpad->dir[bank] & bit)
+ val = kpad->dat_out[bank];
+ else
+ val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
+
+ mutex_unlock(&kpad->gpio_lock);
+
+ return !!(val & bit);
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 1f695f229ea8..184c8f21ab59 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -27,29 +27,32 @@ struct da9052_onkey {
static void da9052_onkey_query(struct da9052_onkey *onkey)
{
- int key_stat;
+ int ret;
- key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
- if (key_stat < 0) {
+ ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
+ if (ret < 0) {
dev_err(onkey->da9052->dev,
- "Failed to read onkey event %d\n", key_stat);
+ "Failed to read onkey event err=%d\n", ret);
} else {
/*
* Since interrupt for deassertion of ONKEY pin is not
* generated, onkey event state determines the onkey
* button state.
*/
- key_stat &= DA9052_EVENTB_ENONKEY;
- input_report_key(onkey->input, KEY_POWER, key_stat);
+ bool pressed = !(ret & DA9052_STATUSA_NONKEY);
+
+ input_report_key(onkey->input, KEY_POWER, pressed);
input_sync(onkey->input);
- }
- /*
- * Interrupt is generated only when the ONKEY pin is asserted.
- * Hence the deassertion of the pin is simulated through work queue.
- */
- if (key_stat)
- schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
+ /*
+ * Interrupt is generated only when the ONKEY pin
+ * is asserted. Hence the deassertion of the pin
+ * is simulated through work queue.
+ */
+ if (pressed)
+ schedule_delayed_work(&onkey->work,
+ msecs_to_jiffies(50));
+ }
}
static void da9052_onkey_work(struct work_struct *work)
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 87095e2f5153..8af34ffe208b 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
__clear_bit(REL_X, input->relbit);
__clear_bit(REL_Y, input->relbit);
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 26386f9d2569..d8d49d10f9bb 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
* Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
+
+static const int *quirk_min_max;
+
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ if (quirk_min_max) {
+ priv->x_min = quirk_min_max[0];
+ priv->x_max = quirk_min_max[1];
+ priv->y_min = quirk_min_max[2];
+ priv->y_max = quirk_min_max[3];
+ return 0;
+ }
+
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+ {
+ /* Lenovo ThinkPad Helix */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+ },
+ .driver_data = (int []){1024, 5052, 2258, 4832},
+ },
+ {
+ /* Lenovo ThinkPad X240 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
+#endif
+ { }
+};
+
void __init synaptics_module_init(void)
{
+ const struct dmi_system_id *min_max_dmi;
+
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+ min_max_dmi = dmi_first_match(min_max_dmi_table);
+ if (min_max_dmi)
+ quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 22586e92da0e..b9f7022f4e81 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17641,8 +17641,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
@@ -17674,7 +17672,8 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_TSO_ECN;
}
- dev->features |= features;
+ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= features;
/*
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bcce0b437722..d04b1c3c9b85 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -89,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -162,7 +163,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
-#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
#define MVNETA_GMAC_STATUS 0x2c10
@@ -711,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- if (enable)
- val |= MVNETA_GMAC2_PORT_RGMII;
- else
- val &= ~MVNETA_GMAC2_PORT_RGMII;
-
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_PSC_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -2757,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvneta_port_sgmii_config(pp);
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ else
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
- mvneta_gmac_rgmii_set(pp, 1);
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
/* Cancel Port Reset */
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 61d7bcff4533..f0ae95f66ceb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2755,7 +2755,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- int ret = __mlx4_init_one(pdev, 0);
+ const struct pci_device_id *id;
+ int ret;
+
+ id = pci_match_id(mlx4_pci_table, pdev);
+ ret = __mlx4_init_one(pdev, id->driver_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 5b63405f9d3c..0a1d76acab81 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4756,7 +4756,9 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 1da36764b1a4..46a7790be004 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
- dev->vlan_features |= IFB_FEATURES;
+ dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dd10d5817d2a..f9e96c427558 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
- add_wait_queue(&unlink_wakeup, &wait);
+ add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- dev->wait = &unlink_wakeup;
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
@@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
"waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
- dev->wait = NULL;
- remove_wait_queue(&unlink_wakeup, &wait);
+ remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval;
+ int retval, pm;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net)
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
+ /* to not race resume */
+ pm = usb_autopm_get_interface(dev->intf);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
@@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
@@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
- // waiting for all pending urbs to complete?
- if (dev->wait) {
- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
- wake_up (dev->wait);
- }
+ /* waiting for all pending urbs to complete?
+ * only then can we forgo submitting anew
+ */
+ if (waitqueue_active(&dev->wait)) {
+ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+ wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
@@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_name = name;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ init_waitqueue_head(&dev->wait);
skb_queue_head_init (&dev->rxq);
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
@@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
- /* handle remote wakeup ASAP */
- if (!dev->wait &&
- netif_device_present(dev->net) &&
+ /* handle remote wakeup ASAP
+ * we cannot race against stop
+ */
+ if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e1c77d4b80e4..b4a10bcb66a0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -278,7 +278,10 @@ static void veth_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
dev->vlan_features = dev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 99fa48c941c6..6f15d9b315a1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
if (err)
break;
} while (rq->vq->num_free);
- if (unlikely(!virtqueue_kick(rq->vq)))
- return false;
+ virtqueue_kick(rq->vq);
return !oom;
}
@@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
+ if (unlikely(err)) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index a0fa5de210cf..e1e22e0f01e8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ msg.msg_iovlen = 1;
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 37d06ea624aa..61a6ac8fa8fc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
+ scrub_page(page);
- pfn = page_to_pfn(page);
- frame_list[i] = pfn_to_mfn(pfn);
+ frame_list[i] = page_to_pfn(page);
+ }
- scrub_page(page);
+ /*
+ * Ensure that ballooned highmem pages don't have kmaps.
+ *
+ * Do this before changing the p2m as kmap_flush_unused()
+ * reads PTEs to obtain pages (and hence needs the original
+ * p2m entry).
+ */
+ kmap_flush_unused();
+
+ /* Update direct mapping, invalidate P2M, and add to balloon. */
+ for (i = 0; i < nr_pages; i++) {
+ pfn = frame_list[i];
+ frame_list[i] = pfn_to_mfn(pfn);
+ page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
@@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
#endif
- balloon_append(pfn_to_page(pfn));
+ balloon_append(page);
}
- /* Ensure that ballooned highmem pages don't have kmaps. */
- kmap_flush_unused();
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 24084732b1d0..80ef38c73e5a 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- struct dentry *root;
- root = mount_pseudo(fs_type, "anon_inode:", NULL,
+ return mount_pseudo(fs_type, "anon_inode:", NULL,
&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
- if (!IS_ERR(root)) {
- struct super_block *s = root->d_sb;
- anon_inode_inode = alloc_anon_inode(s);
- if (IS_ERR(anon_inode_inode)) {
- dput(root);
- deactivate_locked_super(s);
- root = ERR_CAST(anon_inode_inode);
- }
- }
- return root;
}
static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
static int __init anon_inode_init(void)
{
- int error;
-
- error = register_filesystem(&anon_inode_fs_type);
- if (error)
- goto err_exit;
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
- if (IS_ERR(anon_inode_mnt)) {
- error = PTR_ERR(anon_inode_mnt);
- goto err_unregister_filesystem;
- }
- return 0;
+ if (IS_ERR(anon_inode_mnt))
+ panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
-err_unregister_filesystem:
- unregister_filesystem(&anon_inode_fs_type);
-err_exit:
- panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
+ anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ if (IS_ERR(anon_inode_inode))
+ panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+
+ return 0;
}
fs_initcall(anon_inode_init);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 1324e6600e57..ca5ce14cbddc 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name,
strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
new_conn->cc_namelen = grouplen;
- strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1);
+ if (cluster_name_len)
+ strlcpy(new_conn->cc_cluster_name, cluster_name,
+ CLUSTER_NAME_MAX + 1);
new_conn->cc_cluster_name_len = cluster_name_len;
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 1005ebf17575..5a09a48f2658 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -163,4 +163,11 @@ enum {
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4cd5e9e13c87..06287c110241 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3097,7 +3097,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
return __skb_gso_segment(skb, features, true);
}
-__be16 skb_network_protocol(struct sk_buff *skb);
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index aa2c22cb8158..18ef0224fb6a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2508,8 +2508,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
-void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
- int len, int hlen);
+int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
+ int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e303eef94dd5..0662e98fef72 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -30,7 +30,7 @@ struct usbnet {
struct driver_info *driver_info;
const char *driver_name;
void *driver_priv;
- wait_queue_head_t *wait;
+ wait_queue_head_t wait;
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 9650a3ffd2d2..b4956a5fcc3f 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -31,8 +31,10 @@
#define IF_PREFIX_AUTOCONF 0x02
enum {
+ INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
+ INET6_IFADDR_STATE_ERRDAD,
INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
@@ -58,7 +60,7 @@ struct inet6_ifaddr {
unsigned long cstamp; /* created timestamp */
unsigned long tstamp; /* updated timestamp */
- struct timer_list dad_timer;
+ struct delayed_work dad_work;
struct inet6_dev *idev;
struct rt6_info *rt;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 815c878f409b..24c1f2382557 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
+static struct ring_buffer *temp_buffer;
+
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
struct ftrace_event_file *ftrace_file,
int type, unsigned long len,
unsigned long flags, int pc)
{
+ struct ring_buffer_event *entry;
+
*current_rb = ftrace_file->tr->trace_buffer.buffer;
- return trace_buffer_lock_reserve(*current_rb,
+ entry = trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
+ /*
+ * If tracing is off, but we have triggers enabled
+ * we still need to look at the event data. Use the temp_buffer
+ * to store the trace event for the tigger to use. It's recusive
+ * safe and will not be recorded anywhere.
+ */
+ if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
+ *current_rb = temp_buffer;
+ entry = trace_buffer_lock_reserve(*current_rb,
+ type, len, flags, pc);
+ }
+ return entry;
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
@@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void)
raw_spin_lock_init(&global_trace.start_lock);
+ /* Used for event triggers */
+ temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
+ if (!temp_buffer)
+ goto out_free_cpumask;
+
/* TODO: make the number of buffers hot pluggable with CPUS */
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
- goto out_free_cpumask;
+ goto out_free_temp_buffer;
}
if (global_trace.buffer_disabled)
@@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void)
return 0;
+out_free_temp_buffer:
+ ring_buffer_free(temp_buffer);
out_free_cpumask:
free_percpu(global_trace.trace_buffer.data);
#ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/lib/random32.c b/lib/random32.c
index 1e5b2df44291..614896778700 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
static bool latch = false;
static DEFINE_SPINLOCK(lock);
+ /* Asking for random bytes might result in bytes getting
+ * moved into the nonblocking pool and thus marking it
+ * as initialized. In this case we would double back into
+ * this function and attempt to do a late reseed.
+ * Ignore the pointless attempt to reseed again if we're
+ * already waiting for bytes when the nonblocking pool
+ * got initialized.
+ */
+
/* only allow initial seeding (late == false) once */
- spin_lock_irqsave(&lock, flags);
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
if (latch && !late)
goto out;
latch = true;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ec9909935fb6..175273f38cb1 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
vlandev->gso_max_size = dev->gso_max_size;
- if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index a78bebeca4d9..6f142f03716d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev)
dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
+ if (dev->features & NETIF_F_VLAN_FEATURES)
+ netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
+
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
@@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev)
#endif
dev->needed_headroom = real_dev->needed_headroom;
- if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+ if (vlan_hw_offload_capable(real_dev->features,
+ vlan_dev_priv(dev)->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0dd01a05bd59..3e2da2cb72db 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
- if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
- goto out;
-
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
+ if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+ goto out;
+
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb, false);
else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 28d544627422..d0cca3c65f01 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
+ struct net_port_vlans *pv;
u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
@@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb)
* packet is allowed except in promisc modue when someone
* may be running packet capture.
*/
+ pv = br_get_vlan_info(br);
if (!(brdev->flags & IFF_PROMISC) &&
- !br_allowed_egress(br, br_get_vlan_info(br), skb)) {
+ !br_allowed_egress(br, pv, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
- skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
- if (!skb)
- return NET_RX_DROP;
-
indev = skb->dev;
skb->dev = brdev;
+ skb = br_handle_vlan(br, pv, skb);
+ if (!skb)
+ return NET_RX_DROP;
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5d5b101be102..91510712c7a7 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v)
kfree_rcu(v, rcu);
}
-/* Strip the tag from the packet. Will return skb with tci set 0. */
-static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
-{
- if (skb->protocol != htons(ETH_P_8021Q)) {
- skb->vlan_tci = 0;
- return skb;
- }
-
- skb->vlan_tci = 0;
- skb = vlan_untag(skb);
- if (skb)
- skb->vlan_tci = 0;
-
- return skb;
-}
-
struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_port_vlans *pv,
struct sk_buff *skb)
@@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
if (!br->vlan_enabled)
goto out;
+ /* Vlan filter table must be configured at this point. The
+ * only exception is the bridge is set in promisc mode and the
+ * packet is destined for the bridge device. In this case
+ * pass the packet as is.
+ */
+ if (!pv) {
+ if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
+ goto out;
+ } else {
+ kfree_skb(skb);
+ return NULL;
+ }
+ }
+
/* At this point, we know that the frame was filtered and contains
* a valid vlan id. If the vlan id is set in the untagged bitmap,
* send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
if (test_bit(vid, pv->untagged_bitmap))
- skb = br_vlan_untag(skb);
+ skb->vlan_tci = 0;
out:
return skb;
@@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (!v)
return false;
+ /* If vlan tx offload is disabled on bridge device and frame was
+ * sent from vlan device on the bridge device, it does not have
+ * HW accelerated vlan tag.
+ */
+ if (unlikely(!vlan_tx_tag_present(skb) &&
+ (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)))) {
+ skb = vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
err = br_vlan_get_tag(skb, vid);
if (!*vid) {
u16 pvid = br_get_pvid(v);
diff --git a/net/core/dev.c b/net/core/dev.c
index 778b2036a9e7..cf92139b229c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2281,7 +2281,7 @@ out:
}
EXPORT_SYMBOL(skb_checksum_help);
-__be16 skb_network_protocol(struct sk_buff *skb)
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
int vlan_depth = ETH_HLEN;
@@ -2308,6 +2308,8 @@ __be16 skb_network_protocol(struct sk_buff *skb)
vlan_depth += VLAN_HLEN;
}
+ *depth = vlan_depth;
+
return type;
}
@@ -2321,12 +2323,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_offload *ptype;
- __be16 type = skb_network_protocol(skb);
+ int vlan_depth = skb->mac_len;
+ __be16 type = skb_network_protocol(skb, &vlan_depth);
if (unlikely(!type))
return ERR_PTR(-EINVAL);
- __skb_pull(skb, skb->mac_len);
+ __skb_pull(skb, vlan_depth);
rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) {
@@ -2493,8 +2496,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
const struct net_device *dev,
netdev_features_t features)
{
+ int tmp;
+
if (skb->ip_summed != CHECKSUM_NONE &&
- !can_checksum_protocol(features, skb_network_protocol(skb))) {
+ !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
features &= ~NETIF_F_ALL_CSUM;
} else if (illegal_highdma(dev, skb)) {
features &= ~NETIF_F_SG;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3f14c638c2b1..30c7d35dd862 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
*
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
* headroom in the `to` buffer.
+ *
+ * Return value:
+ * 0: everything is OK
+ * -ENOMEM: couldn't orphan frags of @from due to lack of memory
+ * -EFAULT: skb_copy_bits() found some problem with skb geometry
*/
-void
-skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+int
+skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
+ int ret;
struct page *page;
unsigned int offset;
BUG_ON(!from->head_frag && !hlen);
/* dont bother with small payloads */
- if (len <= skb_tailroom(to)) {
- skb_copy_bits(from, 0, skb_put(to, len), len);
- return;
- }
+ if (len <= skb_tailroom(to))
+ return skb_copy_bits(from, 0, skb_put(to, len), len);
if (hlen) {
- skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+ ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+ if (unlikely(ret))
+ return ret;
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
@@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
to->len += len + plen;
to->data_len += len + plen;
+ if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
+ skb_tx_error(from);
+ return -ENOMEM;
+ }
+
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
if (!len)
break;
@@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
j++;
}
skb_shinfo(to)->nr_frags = j;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(skb_zerocopy);
@@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
int err = -ENOMEM;
int i = 0;
int pos;
+ int dummy;
- proto = skb_network_protocol(head_skb);
+ proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 1863422fb7d5..250be7421ab3 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
int i;
bool csum_err = false;
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+ /* Looped back packet, drop it! */
+ if (rt_is_output_route(skb_rtable(skb)))
+ goto drop;
+ }
+#endif
+
if (parse_gre_header(skb, &tpi, &csum_err) < 0)
goto drop;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 66aaf506fbef..e77381d1df9a 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -420,9 +420,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
- /* Looped back packet, drop it! */
- if (rt_is_output_route(skb_rtable(skb)))
- goto drop;
tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index b86f0a37fa7c..e0c2b1d2ea4e 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
nf_reset(skb);
secpath_reset(skb);
skb_clear_hash_if_not_l4(skb);
+ skb_dst_drop(skb);
skb->vlan_tci = 0;
skb_set_queue_mapping(skb, 0);
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4555244607d0..6379894ec210 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
{
__be32 dest, src;
__u16 destp, srcp;
- long delta = tw->tw_ttd - jiffies;
+ s32 delta = tw->tw_ttd - inet_tw_time_stamp();
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 344e972426df..6c7fa0853fc7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
static DEFINE_SPINLOCK(addrconf_hash_lock);
-static void addrconf_verify(unsigned long);
+static void addrconf_verify(void);
+static void addrconf_verify_rtnl(void);
+static void addrconf_verify_work(struct work_struct *);
-static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
-static DEFINE_SPINLOCK(addrconf_verify_lock);
+static struct workqueue_struct *addrconf_wq;
+static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
@@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
u32 flags, u32 noflags);
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
-static void addrconf_dad_timer(unsigned long data);
+static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_rs_timer(unsigned long data);
@@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev)
__in6_dev_put(idev);
}
-static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp)
+static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
{
- if (del_timer(&ifp->dad_timer))
+ if (cancel_delayed_work(&ifp->dad_work))
__in6_ifa_put(ifp);
}
@@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
mod_timer(&idev->rs_timer, jiffies + when);
}
-static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
- unsigned long when)
+static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
+ unsigned long delay)
{
- if (!timer_pending(&ifp->dad_timer))
+ if (!delayed_work_pending(&ifp->dad_work))
in6_ifa_hold(ifp);
- mod_timer(&ifp->dad_timer, jiffies + when);
+ mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
in6_dev_put(ifp->idev);
- if (del_timer(&ifp->dad_timer))
- pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
+ if (cancel_delayed_work(&ifp->dad_work))
+ pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
+ ifp);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
pr_warn("Freeing alive inet6 address %p\n", ifp);
@@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
- setup_timer(&ifa->dad_timer, addrconf_dad_timer,
- (unsigned long)ifa);
+ INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
ifa->prefix_len = pfxlen;
@@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
unsigned long expires;
+ ASSERT_RTNL();
+
spin_lock_bh(&ifp->state_lock);
state = ifp->state;
ifp->state = INET6_IFADDR_STATE_DEAD;
@@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
write_unlock_bh(&ifp->idev->lock);
- addrconf_del_dad_timer(ifp);
+ addrconf_del_dad_work(ifp);
ipv6_ifa_notify(RTM_DELADDR, ifp);
@@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
- addrconf_del_dad_timer(ifp);
+ addrconf_del_dad_work(ifp);
ifp->flags |= IFA_F_TENTATIVE;
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
@@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
- } else
+ } else {
ipv6_del_addr(ifp);
+ }
}
static int addrconf_dad_end(struct inet6_ifaddr *ifp)
{
int err = -ENOENT;
- spin_lock(&ifp->state_lock);
+ spin_lock_bh(&ifp->state_lock);
if (ifp->state == INET6_IFADDR_STATE_DAD) {
ifp->state = INET6_IFADDR_STATE_POSTDAD;
err = 0;
}
- spin_unlock(&ifp->state_lock);
+ spin_unlock_bh(&ifp->state_lock);
return err;
}
@@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
}
}
- addrconf_dad_stop(ifp, 1);
+ spin_lock_bh(&ifp->state_lock);
+ /* transition from _POSTDAD to _ERRDAD */
+ ifp->state = INET6_IFADDR_STATE_ERRDAD;
+ spin_unlock_bh(&ifp->state_lock);
+
+ addrconf_mod_dad_work(ifp, 0);
}
/* Join to solicited addr multicast group. */
@@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
{
struct in6_addr maddr;
+ ASSERT_RTNL();
+
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct in6_addr maddr;
+ ASSERT_RTNL();
+
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
+
+ ASSERT_RTNL();
+
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
+
+ ASSERT_RTNL();
+
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -2271,11 +2291,13 @@ ok:
return;
}
- ifp->flags |= IFA_F_MANAGETEMPADDR;
update_lft = 0;
create = 1;
+ spin_lock_bh(&ifp->lock);
+ ifp->flags |= IFA_F_MANAGETEMPADDR;
ifp->cstamp = jiffies;
ifp->tokenized = tokenized;
+ spin_unlock_bh(&ifp->lock);
addrconf_dad_start(ifp);
}
@@ -2326,7 +2348,7 @@ ok:
create, now);
in6_ifa_put(ifp);
- addrconf_verify(0);
+ addrconf_verify();
}
}
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
@@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
true, jiffies);
in6_ifa_put(ifp);
- addrconf_verify(0);
+ addrconf_verify_rtnl();
return 0;
}
@@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
hlist_for_each_entry_rcu(ifa, h, addr_lst) {
if (ifa->idev == idev) {
hlist_del_init_rcu(&ifa->addr_lst);
- addrconf_del_dad_timer(ifa);
+ addrconf_del_dad_work(ifa);
goto restart;
}
}
@@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
while (!list_empty(&idev->addr_list)) {
ifa = list_first_entry(&idev->addr_list,
struct inet6_ifaddr, if_list);
- addrconf_del_dad_timer(ifa);
+ addrconf_del_dad_work(ifa);
list_del(&ifa->if_list);
@@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
ifp->dad_probes = idev->cnf.dad_transmits;
- addrconf_mod_dad_timer(ifp, rand_num);
+ addrconf_mod_dad_work(ifp, rand_num);
}
-static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
@@ -3203,25 +3225,68 @@ out:
read_unlock_bh(&idev->lock);
}
-static void addrconf_dad_timer(unsigned long data)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp)
{
- struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
+ bool begin_dad = false;
+
+ spin_lock_bh(&ifp->state_lock);
+ if (ifp->state != INET6_IFADDR_STATE_DEAD) {
+ ifp->state = INET6_IFADDR_STATE_PREDAD;
+ begin_dad = true;
+ }
+ spin_unlock_bh(&ifp->state_lock);
+
+ if (begin_dad)
+ addrconf_mod_dad_work(ifp, 0);
+}
+
+static void addrconf_dad_work(struct work_struct *w)
+{
+ struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
+ struct inet6_ifaddr,
+ dad_work);
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
+ enum {
+ DAD_PROCESS,
+ DAD_BEGIN,
+ DAD_ABORT,
+ } action = DAD_PROCESS;
+
+ rtnl_lock();
+
+ spin_lock_bh(&ifp->state_lock);
+ if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
+ action = DAD_BEGIN;
+ ifp->state = INET6_IFADDR_STATE_DAD;
+ } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
+ action = DAD_ABORT;
+ ifp->state = INET6_IFADDR_STATE_POSTDAD;
+ }
+ spin_unlock_bh(&ifp->state_lock);
+
+ if (action == DAD_BEGIN) {
+ addrconf_dad_begin(ifp);
+ goto out;
+ } else if (action == DAD_ABORT) {
+ addrconf_dad_stop(ifp, 1);
+ goto out;
+ }
+
if (!ifp->dad_probes && addrconf_dad_end(ifp))
goto out;
- write_lock(&idev->lock);
+ write_lock_bh(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY)) {
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
goto out;
}
spin_lock(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_DEAD) {
spin_unlock(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
goto out;
}
@@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data)
}
ifp->dad_probes--;
- addrconf_mod_dad_timer(ifp,
- NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
+ addrconf_mod_dad_work(ifp,
+ NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
spin_unlock(&ifp->lock);
- write_unlock(&idev->lock);
+ write_unlock_bh(&idev->lock);
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
out:
in6_ifa_put(ifp);
+ rtnl_unlock();
}
/* ifp->idev must be at least read locked */
@@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
struct in6_addr lladdr;
bool send_rs, send_mld;
- addrconf_del_dad_timer(ifp);
+ addrconf_del_dad_work(ifp);
/*
* Configure the address for reception. Now it is valid.
@@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
* Periodic address status verification
*/
-static void addrconf_verify(unsigned long foo)
+static void addrconf_verify_rtnl(void)
{
unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
int i;
+ ASSERT_RTNL();
+
rcu_read_lock_bh();
- spin_lock(&addrconf_verify_lock);
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
- del_timer(&addr_chk_timer);
+ cancel_delayed_work(&addr_chk_work);
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- hlist_for_each_entry_rcu_bh(ifp,
- &inet6_addr_lst[i], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
unsigned long age;
/* When setting preferred_lft to a value not zero or
@@ -3628,13 +3694,22 @@ restart:
ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
now, next, next_sec, next_sched);
-
- addr_chk_timer.expires = next_sched;
- add_timer(&addr_chk_timer);
- spin_unlock(&addrconf_verify_lock);
+ mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
rcu_read_unlock_bh();
}
+static void addrconf_verify_work(struct work_struct *w)
+{
+ rtnl_lock();
+ addrconf_verify_rtnl();
+ rtnl_unlock();
+}
+
+static void addrconf_verify(void)
+{
+ mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
+}
+
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
struct in6_addr **peer_pfx)
{
@@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
bool was_managetempaddr;
bool had_prefixroute;
+ ASSERT_RTNL();
+
if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL;
@@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
!was_managetempaddr, jiffies);
}
- addrconf_verify(0);
+ addrconf_verify_rtnl();
return 0;
}
@@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
bool update_rs = false;
struct in6_addr ll_addr;
+ ASSERT_RTNL();
+
if (token == NULL)
return -EINVAL;
if (ipv6_addr_any(token))
@@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
}
write_unlock_bh(&idev->lock);
- addrconf_verify(0);
+ addrconf_verify_rtnl();
return 0;
}
@@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
{
struct net *net = dev_net(ifp->idev->dev);
+ if (event)
+ ASSERT_RTNL();
+
inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
switch (event) {
@@ -5244,6 +5326,12 @@ int __init addrconf_init(void)
if (err < 0)
goto out_addrlabel;
+ addrconf_wq = create_workqueue("ipv6_addrconf");
+ if (!addrconf_wq) {
+ err = -ENOMEM;
+ goto out_nowq;
+ }
+
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
* before it can bring up and give link-local addresses
@@ -5274,7 +5362,7 @@ int __init addrconf_init(void)
register_netdevice_notifier(&ipv6_dev_notf);
- addrconf_verify(0);
+ addrconf_verify();
rtnl_af_register(&inet6_ops);
@@ -5302,6 +5390,8 @@ errout:
rtnl_af_unregister(&inet6_ops);
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
+ destroy_workqueue(addrconf_wq);
+out_nowq:
unregister_pernet_subsys(&addrconf_ops);
out_addrlabel:
ipv6_addr_label_cleanup();
@@ -5337,7 +5427,8 @@ void addrconf_cleanup(void)
for (i = 0; i < IN6_ADDR_HSIZE; i++)
WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
spin_unlock_bh(&addrconf_hash_lock);
-
- del_timer(&addr_chk_timer);
+ cancel_delayed_work(&addr_chk_work);
rtnl_unlock();
+
+ destroy_workqueue(addrconf_wq);
}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index f072fe803510..108120f216b1 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
- if (!skb)
+ if (!skb) {
+ skb_tx_error(entskb);
return NULL;
+ }
nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
sizeof(struct nfgenmsg), 0);
if (!nlh) {
+ skb_tx_error(entskb);
kfree_skb(skb);
return NULL;
}
@@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
- skb_zerocopy(skb, entskb, data_len, hlen);
+ if (skb_zerocopy(skb, entskb, data_len, hlen))
+ goto nla_put_failure;
}
nlh->nlmsg_len = skb->len;
return skb;
nla_put_failure:
+ skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 48bbcd94631e..a3276e3c4feb 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
}
nla->nla_len = nla_attr_size(skb->len);
- skb_zerocopy(user_skb, skb, skb->len, hlen);
+ err = skb_zerocopy(user_skb, skb, skb->len, hlen);
+ if (err)
+ goto out;
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
@@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
out:
+ if (err)
+ skb_tx_error(skb);
kfree_skb(nskb);
return err;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dda451f4429c..2998989e76db 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -103,30 +103,24 @@ static void stats_read(struct flow_stats *stats,
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
- int cpu, cur_cpu;
+ int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
+ local_bh_disable();
if (!flow->stats.is_percpu) {
stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
} else {
- cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
struct flow_stats *stats;
- if (cpu == cur_cpu)
- local_bh_disable();
-
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
stats_read(stats, ovs_stats, used, tcp_flags);
-
- if (cpu == cur_cpu)
- local_bh_enable();
}
- put_cpu();
}
+ local_bh_enable();
}
static void stats_reset(struct flow_stats *stats)
@@ -141,25 +135,17 @@ static void stats_reset(struct flow_stats *stats)
void ovs_flow_stats_clear(struct sw_flow *flow)
{
- int cpu, cur_cpu;
+ int cpu;
+ local_bh_disable();
if (!flow->stats.is_percpu) {
stats_reset(flow->stats.stat);
} else {
- cur_cpu = get_cpu();
-
for_each_possible_cpu(cpu) {
-
- if (cpu == cur_cpu)
- local_bh_disable();
-
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
-
- if (cpu == cur_cpu)
- local_bh_enable();
}
- put_cpu();
}
+ local_bh_enable();
}
static int check_header(struct sk_buff *skb, int len)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ce6ec6c2f4de..94404f19f9de 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
@@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
int copied = 0;
+ int noblock = flags & MSG_DONTWAIT;
int check_creds = 0;
int target;
int err = 0;
@@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+ timeo = sock_rcvtimeo(sk, noblock);
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(timeo);
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}