summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amba/bus.h9
-rw-r--r--include/linux/apple-gmux.h50
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/atmel_serial.h3
-rw-r--r--include/linux/atomic.h31
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/auto_dev-ioctl.h6
-rw-r--r--include/linux/auto_fs.h10
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h42
-rw-r--r--include/linux/bio.h32
-rw-r--r--include/linux/bitmap.h10
-rw-r--r--include/linux/blk-mq.h16
-rw-r--r--include/linux/blkdev.h30
-rw-r--r--include/linux/bpf.h32
-rw-r--r--include/linux/buffer_head.h10
-rw-r--r--include/linux/bug.h9
-rw-r--r--include/linux/cache.h14
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/ccp.h17
-rw-r--r--include/linux/ceph/ceph_features.h3
-rw-r--r--include/linux/ceph/ceph_fs.h7
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/ceph/mon_client.h31
-rw-r--r--include/linux/ceph/osd_client.h15
-rw-r--r--include/linux/cgroup-defs.h46
-rw-r--r--include/linux/cgroup.h49
-rw-r--r--include/linux/clk-provider.h30
-rw-r--r--include/linux/clk/at91_pmc.h12
-rw-r--r--include/linux/clk/renesas.h (renamed from include/linux/clk/shmobile.h)4
-rw-r--r--include/linux/clk/ti.h8
-rw-r--r--include/linux/clkdev.h3
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/clocksource.h45
-rw-r--r--include/linux/compaction.h16
-rw-r--r--include/linux/compat.h21
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler.h17
-rw-r--r--include/linux/configfs.h11
-rw-r--r--include/linux/coresight-pmu.h39
-rw-r--r--include/linux/coresight.h34
-rw-r--r--include/linux/cpu.h27
-rw-r--r--include/linux/cpufreq.h47
-rw-r--r--include/linux/cpuhotplug.h93
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cred.h5
-rw-r--r--include/linux/crypto.h252
-rw-r--r--include/linux/davinci_emac.h4
-rw-r--r--include/linux/dax.h8
-rw-r--r--include/linux/dcache.h39
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/device-mapper.h15
-rw-r--r--include/linux/device.h24
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/dma-buf.h14
-rw-r--r--include/linux/dma-mapping.h29
-rw-r--r--include/linux/dmaengine.h8
-rw-r--r--include/linux/dqblk_qtree.h2
-rw-r--r--include/linux/eeprom_93xx46.h9
-rw-r--r--include/linux/efi.h91
-rw-r--r--include/linux/ethtool.h102
-rw-r--r--include/linux/exportfs.h6
-rw-r--r--include/linux/f2fs_fs.h34
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/frame.h23
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/fs.h37
-rw-r--r--include/linux/fscrypto.h434
-rw-r--r--include/linux/fsl/guts.h105
-rw-r--r--include/linux/fsnotify.h9
-rw-r--r--include/linux/fsnotify_backend.h9
-rw-r--r--include/linux/ftrace.h23
-rw-r--r--include/linux/gfp.h49
-rw-r--r--include/linux/gpio/driver.h39
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/huge_mm.h25
-rw-r--r--include/linux/hyperv.h96
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/igmp.h5
-rw-r--r--include/linux/iio/common/st_sensors.h4
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/ima.h10
-rw-r--r--include/linux/inet_lro.h142
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/input/cyttsp.h15
-rw-r--r--include/linux/interrupt.h30
-rw-r--r--include/linux/io.h1
-rw-r--r--include/linux/ioport.h38
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h27
-rw-r--r--include/linux/irqchip/mips-gic.h3
-rw-r--r--include/linux/irqdomain.h46
-rw-r--r--include/linux/iscsi_boot_sysfs.h1
-rw-r--r--include/linux/isdn.h1
-rw-r--r--include/linux/jbd2.h16
-rw-r--r--include/linux/kasan.h37
-rw-r--r--include/linux/kcov.h29
-rw-r--r--include/linux/kernel.h31
-rw-r--r--include/linux/kernfs.h11
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/leds.h8
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/libnvdimm.h8
-rw-r--r--include/linux/lightnvm.h22
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/list_bl.h4
-rw-r--r--include/linux/livepatch.h9
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/lsm_hooks.h35
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/mbcache.h93
-rw-r--r--include/linux/mbus.h3
-rw-r--r--include/linux/memcontrol.h147
-rw-r--r--include/linux/memory.h14
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/mfd/as3711.h3
-rw-r--r--include/linux/mfd/axp20x.h34
-rw-r--r--include/linux/mfd/cros_ec.h2
-rw-r--r--include/linux/mfd/imx25-tsadc.h140
-rw-r--r--include/linux/mfd/max77686-private.h3
-rw-r--r--include/linux/mfd/mt6323/core.h36
-rw-r--r--include/linux/mfd/mt6323/registers.h408
-rw-r--r--include/linux/mfd/mt6397/core.h2
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rc5t583.h5
-rw-r--r--include/linux/mfd/syscon.h8
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h5
-rw-r--r--include/linux/mfd/tmio.h4
-rw-r--r--include/linux/mfd/tps65086.h117
-rw-r--r--include/linux/mfd/tps65090.h5
-rw-r--r--include/linux/mfd/tps65912.h209
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx5/device.h56
-rw-r--r--include/linux/mlx5/driver.h74
-rw-r--r--include/linux/mlx5/fs.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h199
-rw-r--r--include/linux/mlx5/port.h87
-rw-r--r--include/linux/mlx5/qp.h7
-rw-r--r--include/linux/mlx5/vport.h7
-rw-r--r--include/linux/mm.h211
-rw-r--r--include/linux/mm_types.h22
-rw-r--r--include/linux/mman.h6
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h12
-rw-r--r--include/linux/mmc/tmio.h5
-rw-r--r--include/linux/mmdebug.h3
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/msi.h9
-rw-r--r--include/linux/mtd/bbm.h1
-rw-r--r--include/linux/mtd/inftl.h1
-rw-r--r--include/linux/mtd/map.h7
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/mtd/nand_bch.h8
-rw-r--r--include/linux/mtd/nftl.h1
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/nd.h7
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h327
-rw-r--r--include/linux/netfilter.h29
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netfilter/x_tables.h6
-rw-r--r--include/linux/netfilter_arp/arp_tables.h9
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h9
-rw-r--r--include/linux/netlink.h10
-rw-r--r--include/linux/nfs4.h19
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/nsproxy.h2
-rw-r--r--include/linux/ntb.h10
-rw-r--r--include/linux/nvmem-provider.h5
-rw-r--r--include/linux/of.h18
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/omap-gpmc.h5
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags-layout.h2
-rw-r--r--include/linux/page-flags.h32
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/page_owner.h50
-rw-r--r--include/linux/page_ref.h173
-rw-r--r--include/linux/pagemap.h22
-rw-r--r--include/linux/pci-dma-compat.h147
-rw-r--r--include/linux/pci.h105
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h32
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/phy_fixed.h5
-rw-r--r--include/linux/pkeys.h33
-rw-r--r--include/linux/platform_data/ad5761.h44
-rw-r--r--include/linux/platform_data/ad7879.h (renamed from include/linux/spi/ad7879.h)2
-rw-r--r--include/linux/platform_data/adau17x1.h2
-rw-r--r--include/linux/platform_data/at24.h10
-rw-r--r--include/linux/platform_data/brcmfmac-sdio.h135
-rw-r--r--include/linux/platform_data/brcmfmac.h185
-rw-r--r--include/linux/platform_data/microread.h35
-rw-r--r--include/linux/platform_data/mmp_dma.h1
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h1
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/platform_data/sa11x0-serial.h8
-rw-r--r--include/linux/platform_data/serial-omap.h2
-rw-r--r--include/linux/pm_clock.h9
-rw-r--r--include/linux/pm_domain.h13
-rw-r--r--include/linux/pm_opp.h27
-rw-r--r--include/linux/pmem.h19
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/posix-timers.h3
-rw-r--r--include/linux/power/bq24735-charger.h2
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/pps_kernel.h17
-rw-r--r--include/linux/proc_ns.h4
-rw-r--r--include/linux/psci.h3
-rw-r--r--include/linux/pstore_ram.h2
-rw-r--r--include/linux/ptp_clock_kernel.h8
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/qed/common_hsi.h36
-rw-r--r--include/linux/qed/eth_common.h171
-rw-r--r--include/linux/qed/qed_chain.h4
-rw-r--r--include/linux/qed/qed_eth_if.h14
-rw-r--r--include/linux/qed/qed_if.h14
-rw-r--r--include/linux/quicklist.h2
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/radix-tree.h28
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/rculist.h21
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/regmap.h107
-rw-r--r--include/linux/regulator/act8865.h4
-rw-r--r--include/linux/regulator/driver.h17
-rw-r--r--include/linux/regulator/lp872x.h5
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/rfkill-gpio.h37
-rw-r--r--include/linux/rfkill.h18
-rw-r--r--include/linux/rio.h98
-rw-r--r--include/linux/rio_drv.h15
-rw-r--r--include/linux/rio_mport_cdev.h271
-rw-r--r--include/linux/rio_regs.h3
-rw-r--r--include/linux/rmap.h6
-rw-r--r--include/linux/rmi.h359
-rw-r--r--include/linux/rotary_encoder.h17
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/sched.h62
-rw-r--r--include/linux/sched/sysctl.h25
-rw-r--r--include/linux/scpi_protocol.h3
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/serial_8250.h8
-rw-r--r--include/linux/serial_core.h25
-rw-r--r--include/linux/skbuff.h93
-rw-r--r--include/linux/slab.h23
-rw-r--r--include/linux/slab_def.h17
-rw-r--r--include/linux/slub_def.h12
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h24
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h693
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h35
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/spi/eeprom.h2
-rw-r--r--include/linux/spi/spi.h145
-rw-r--r--include/linux/srcu.h19
-rw-r--r--include/linux/stackdepot.h32
-rw-r--r--include/linux/stm.h10
-rw-r--r--include/linux/stmmac.h18
-rw-r--r--include/linux/string.h8
-rw-r--r--include/linux/sunrpc/auth.h7
-rw-r--r--include/linux/sunrpc/clnt.h18
-rw-r--r--include/linux/sunrpc/gss_krb5.h32
-rw-r--r--include/linux/sunrpc/rpc_rdma.h12
-rw-r--r--include/linux/sunrpc/sched.h32
-rw-r--r--include/linux/sunrpc/svc_rdma.h20
-rw-r--r--include/linux/sunrpc/xprt.h22
-rw-r--r--include/linux/sunrpc/xprtmultipath.h69
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/swait.h172
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/thermal.h20
-rw-r--r--include/linux/tick.h99
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h58
-rw-r--r--include/linux/trace_events.h19
-rw-r--r--include/linux/tracepoint-defs.h14
-rw-r--r--include/linux/tracepoint.h17
-rw-r--r--include/linux/tty.h58
-rw-r--r--include/linux/tty_ldisc.h13
-rw-r--r--include/linux/unaligned/access_ok.h24
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h20
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/usb/msm_hsusb_hw.h1
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/of.h7
-rw-r--r--include/linux/usb/otg-fsm.h15
-rw-r--r--include/linux/usb/renesas_usbhs.h1
-rw-r--r--include/linux/usb/storage.h12
-rw-r--r--include/linux/vfio.h11
-rw-r--r--include/linux/vga_switcheroo.h36
-rw-r--r--include/linux/virtio.h23
-rw-r--r--include/linux/virtio_ring.h35
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmw_vmci_defs.h43
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/watchdog.h43
-rw-r--r--include/linux/writeback.h5
319 files changed, 8037 insertions, 2237 deletions
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 9006c4e75cf7..3d8dcdd1aeae 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -163,4 +163,13 @@ struct amba_device name##_device = { \
#define module_amba_driver(__amba_drv) \
module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
+/*
+ * builtin_amba_driver() - Helper macro for drivers that don't do anything
+ * special in driver initcall. This eliminates a lot of boilerplate. Each
+ * driver may only use this macro once, and calling it replaces the instance
+ * device_initcall().
+ */
+#define builtin_amba_driver(__amba_drv) \
+ builtin_driver(__amba_drv, amba_driver_register)
+
#endif
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
new file mode 100644
index 000000000000..b2d32e01dfe4
--- /dev/null
+++ b/include/linux/apple-gmux.h
@@ -0,0 +1,50 @@
+/*
+ * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
+ * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LINUX_APPLE_GMUX_H
+#define LINUX_APPLE_GMUX_H
+
+#include <linux/acpi.h>
+
+#define GMUX_ACPI_HID "APP000B"
+
+#if IS_ENABLED(CONFIG_APPLE_GMUX)
+
+/**
+ * apple_gmux_present() - detect if gmux is built into the machine
+ *
+ * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
+ * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
+ *
+ * Return: %true if gmux is present and the kernel was configured
+ * with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_present(void)
+{
+ return acpi_dev_present(GMUX_ACPI_HID);
+}
+
+#else /* !CONFIG_APPLE_GMUX */
+
+static inline bool apple_gmux_present(void)
+{
+ return false;
+}
+
+#endif /* !CONFIG_APPLE_GMUX */
+
+#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2992bfa1706..c1a2f345cbe6 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
};
enum ata_ioctls {
- ATA_IOC_GET_IO32 = 0x309,
- ATA_IOC_SET_IO32 = 0x324,
+ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
+ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
};
/* core structures */
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index ee696d7e8a43..5a4d664af87a 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -119,7 +119,8 @@
#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
+#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
+#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 301de78d65f7..df4f369254c0 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -34,20 +34,29 @@
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
+ *
+ * Besides, if an arch has a special barrier for acquire/release, it could
+ * implement its own __atomic_op_* and use the same framework for building
+ * variants
*/
+#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
+#endif
+#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
+#endif
+#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
@@ -56,6 +65,7 @@
smp_mb__after_atomic(); \
__ret; \
})
+#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
@@ -548,6 +558,27 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
+/**
+ * fetch_or - perform *ptr |= mask and return old value of *ptr
+ * @ptr: pointer to value
+ * @mask: mask to OR on the value
+ *
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#ifndef fetch_or
+#define fetch_or(ptr, mask) \
+({ typeof(*(ptr)) __old, __val = *(ptr); \
+ for (;;) { \
+ __old = cmpxchg((ptr), __val, __val | (mask)); \
+ if (__old == __val) \
+ break; \
+ __val = __old; \
+ } \
+ __old; \
+})
+#endif
+
+
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b40ed5df5542..e38e3fc13ea8 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -109,6 +109,10 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
/* maximized args number that audit_socketcall can process */
#define AUDITSC_ARGS 6
+/* bit values for ->signal->audit_tty */
+#define AUDIT_TTY_ENABLE BIT(0)
+#define AUDIT_TTY_LOG_PASSWD BIT(1)
+
struct filename;
extern void audit_log_session_info(struct audit_buffer *ab);
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 850f39b33e74..7caaf298f539 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -11,12 +11,7 @@
#define _LINUX_AUTO_DEV_IOCTL_H
#include <linux/auto_fs.h>
-
-#ifdef __KERNEL__
#include <linux/string.h>
-#else
-#include <string.h>
-#endif /* __KERNEL__ */
#define AUTOFS_DEVICE_NAME "autofs"
@@ -125,7 +120,6 @@ static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
in->size = sizeof(struct autofs_dev_ioctl);
in->ioctlfd = -1;
- return;
}
/*
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index fcd704d354c4..b4066bb89083 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -1,14 +1,10 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * linux/include/linux/auto_fs.h
- *
- * Copyright 1997 Transmeta Corporation - All Rights Reserved
+/*
+ * Copyright 1997 Transmeta Corporation - All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
+ */
#ifndef _LINUX_AUTO_FS_H
#define _LINUX_AUTO_FS_H
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3feb1b2d75d8..0367c63f5960 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -151,6 +151,8 @@ struct bcma_host_ops {
#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
#define BCMA_CORE_USB30_DEV 0x83D
#define BCMA_CORE_ARM_CR4 0x83E
+#define BCMA_CORE_GCI 0x840
+#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */
#define BCMA_CORE_ARM_CA7 0x847
#define BCMA_CORE_SYS_MEM 0x849
#define BCMA_CORE_DEFAULT 0xFFF
@@ -199,6 +201,7 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM4707 1
#define BCMA_PKG_ID_BCM4708 2
#define BCMA_PKG_ID_BCM4709 0
+#define BCMA_CHIP_ID_BCM47094 53030
#define BCMA_CHIP_ID_BCM53018 53018
/* Board types (on PCI usually equals to the subsystem dev id) */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index db51a6ffb7d6..846513c73606 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -217,6 +217,11 @@
#define BCMA_CC_CLKDIV_JTAG_SHIFT 8
#define BCMA_CC_CLKDIV_UART 0x000000FF
#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */
+#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001
+#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002
+#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004
+#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */
+#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040
#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */
#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */
#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */
@@ -351,12 +356,12 @@
#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */
#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */
-#define BCMA_CC_CHIPCTL_ADDR 0x0650
-#define BCMA_CC_CHIPCTL_DATA 0x0654
-#define BCMA_CC_REGCTL_ADDR 0x0658
-#define BCMA_CC_REGCTL_DATA 0x065C
-#define BCMA_CC_PLLCTL_ADDR 0x0660
-#define BCMA_CC_PLLCTL_DATA 0x0664
+#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650
+#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654
+#define BCMA_CC_PMU_REGCTL_ADDR 0x0658
+#define BCMA_CC_PMU_REGCTL_DATA 0x065C
+#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660
+#define BCMA_CC_PMU_PLLCTL_DATA 0x0664
#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */
#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */
#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF
@@ -566,17 +571,16 @@
* Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
*/
struct bcma_chipcommon_pmu {
+ struct bcma_device *core; /* Can be separated core or just ChipCommon one */
u8 rev; /* PMU revision */
u32 crystalfreq; /* The active crystal frequency (in kHz) */
};
-#ifdef CONFIG_BCMA_DRIVER_MIPS
+#ifdef CONFIG_BCMA_PFLASH
struct bcma_pflash {
bool present;
- u8 buswidth;
- u32 window;
- u32 window_size;
};
+#endif
#ifdef CONFIG_BCMA_SFLASH
struct mtd_info;
@@ -600,6 +604,7 @@ struct bcma_nflash {
};
#endif
+#ifdef CONFIG_BCMA_DRIVER_MIPS
struct bcma_serial_port {
void *regs;
unsigned long clockspeed;
@@ -619,8 +624,9 @@ struct bcma_drv_cc {
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
struct bcma_chipcommon_pmu pmu;
-#ifdef CONFIG_BCMA_DRIVER_MIPS
+#ifdef CONFIG_BCMA_PFLASH
struct bcma_pflash pflash;
+#endif
#ifdef CONFIG_BCMA_SFLASH
struct bcma_sflash sflash;
#endif
@@ -628,6 +634,7 @@ struct bcma_drv_cc {
struct bcma_nflash nflash;
#endif
+#ifdef CONFIG_BCMA_DRIVER_MIPS
int nr_serial_ports;
struct bcma_serial_port serial_ports[4];
#endif /* CONFIG_BCMA_DRIVER_MIPS */
@@ -660,6 +667,19 @@ struct bcma_drv_cc_b {
#define bcma_cc_maskset32(cc, offset, mask, set) \
bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
+/* PMU registers access */
+#define bcma_pmu_read32(cc, offset) \
+ bcma_read32((cc)->pmu.core, offset)
+#define bcma_pmu_write32(cc, offset, val) \
+ bcma_write32((cc)->pmu.core, offset, val)
+
+#define bcma_pmu_mask32(cc, offset, mask) \
+ bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask))
+#define bcma_pmu_set32(cc, offset, set) \
+ bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set))
+#define bcma_pmu_maskset32(cc, offset, mask, set) \
+ bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set))
+
extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5349e6816cbb..88bc64f00bb5 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -310,6 +310,38 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ int idx;
+
+ if (unlikely(!bio_multiple_segments(bio))) {
+ *bv = bio_iovec(bio);
+ return;
+ }
+
+ bio_advance_iter(bio, &iter, iter.bi_size);
+
+ if (!iter.bi_bvec_done)
+ idx = iter.bi_idx - 1;
+ else /* in the middle of bvec */
+ idx = iter.bi_idx;
+
+ *bv = bio->bi_io_vec[idx];
+
+ /*
+ * iter.bi_bvec_done records actual length of the last bvec
+ * if this bio ends in the middle of one io vector
+ */
+ if (iter.bi_bvec_done)
+ bv->bv_len = iter.bi_bvec_done;
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 9653fdb76a42..e9b0b9ab07e5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -59,6 +59,8 @@
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
+ * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
*/
/*
@@ -163,6 +165,14 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
+extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
+ unsigned int nbits,
+ const u32 *buf,
+ unsigned int nwords);
+extern unsigned int bitmap_to_u32array(u32 *buf,
+ unsigned int nwords,
+ const unsigned long *bitmap,
+ unsigned int nbits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7fc9296b5742..9ac9799b702b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -244,6 +244,8 @@ void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
+
/*
* Driver command data is immediately after the request. So subtract request
* size to get back to the original request, add request size to get the PDU.
@@ -261,22 +263,8 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (q)->nr_hw_queues && \
({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
-#define queue_for_each_ctx(q, ctx, i) \
- for ((i) = 0; (i) < (q)->nr_queues && \
- ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
-
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-#define blk_ctx_sum(q, sum) \
-({ \
- struct blk_mq_ctx *__x; \
- unsigned int __ret = 0, __i; \
- \
- queue_for_each_ctx((q), __x, __i) \
- __ret += sum; \
- __ret; \
-})
-
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4571ef1a12a9..7e5d7e018bea 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -499,7 +499,8 @@ struct request_queue {
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+ (1 << QUEUE_FLAG_SAME_COMP) | \
+ (1 << QUEUE_FLAG_POLL))
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
@@ -895,7 +896,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+ if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
@@ -1029,6 +1030,7 @@ extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
@@ -1039,6 +1041,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
+extern inline void blk_set_runtime_active(struct request_queue *q) {}
#endif
/*
@@ -1372,6 +1375,13 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
@@ -1381,18 +1391,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
{
if (!queue_virt_boundary(q))
return false;
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return __bvec_gap_to_prev(q, bprv, offset);
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
- if (!bio_has_data(prev))
- return false;
+ if (bio_has_data(prev) && queue_virt_boundary(q)) {
+ struct bio_vec pb, nb;
+
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
- return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bi_io_vec[0].bv_offset);
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ }
+
+ return false;
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 83d1926c61e4..21ee41b92e8a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/percpu.h>
struct bpf_map;
@@ -36,6 +37,7 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u32 map_flags;
u32 pages;
struct user_struct *user;
const struct bpf_map_ops *ops;
@@ -65,6 +67,7 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+ ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -151,6 +154,7 @@ struct bpf_array {
union {
char value[0] __aligned(8);
void *ptrs[0] __aligned(8);
+ void __percpu *pptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
@@ -161,6 +165,8 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
#ifdef CONFIG_BPF_SYSCALL
+DECLARE_PER_CPU(int, bpf_prog_active);
+
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
@@ -173,6 +179,7 @@ struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
+int bpf_map_precharge_memlock(u32 pages);
extern int sysctl_unprivileged_bpf_disabled;
@@ -182,6 +189,30 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname);
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+ u64 flags);
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+ u64 flags);
+int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ *ldst++ = *lsrc++;
+}
+
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
#else
@@ -213,6 +244,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 89d9aa9e79bf..c67f052cc5e5 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -82,15 +82,15 @@ struct buffer_head {
* and buffer_foo() functions.
*/
#define BUFFER_FNS(bit, name) \
-static inline void set_buffer_##name(struct buffer_head *bh) \
+static __always_inline void set_buffer_##name(struct buffer_head *bh) \
{ \
set_bit(BH_##bit, &(bh)->b_state); \
} \
-static inline void clear_buffer_##name(struct buffer_head *bh) \
+static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
{ \
clear_bit(BH_##bit, &(bh)->b_state); \
} \
-static inline int buffer_##name(const struct buffer_head *bh) \
+static __always_inline int buffer_##name(const struct buffer_head *bh) \
{ \
return test_bit(BH_##bit, &(bh)->b_state); \
}
@@ -99,11 +99,11 @@ static inline int buffer_##name(const struct buffer_head *bh) \
* test_set_buffer_foo() and test_clear_buffer_foo()
*/
#define TAS_BUFFER_FNS(bit, name) \
-static inline int test_set_buffer_##name(struct buffer_head *bh) \
+static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
{ \
return test_and_set_bit(BH_##bit, &(bh)->b_state); \
} \
-static inline int test_clear_buffer_##name(struct buffer_head *bh) \
+static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
{ \
return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
} \
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 7f4818673c41..e51b0709e78d 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -20,6 +20,7 @@ struct pt_regs;
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
#define BUILD_BUG() (0)
+#define MAYBE_BUILD_BUG_ON(cond) (0)
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
@@ -83,6 +84,14 @@ struct pt_regs;
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
+#define MAYBE_BUILD_BUG_ON(cond) \
+ do { \
+ if (__builtin_constant_p((cond))) \
+ BUILD_BUG_ON(cond); \
+ else \
+ BUG_ON(cond); \
+ } while (0)
+
#endif /* __CHECKER__ */
#ifdef CONFIG_GENERIC_BUG
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 17e7e82d2aa7..1be04f8c563a 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -12,10 +12,24 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
+/*
+ * __read_mostly is used to keep rarely changing variables out of frequently
+ * updated cachelines. If an architecture doesn't support it, ignore the
+ * hint.
+ */
#ifndef __read_mostly
#define __read_mostly
#endif
+/*
+ * __ro_after_init is used to mark things that are read-only after init (i.e.
+ * after mark_rodata_ro() has been called). These are effectively read-only,
+ * but may get written to during init, so can't live in .rodata (via "const").
+ */
+#ifndef __ro_after_init
+#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#endif
+
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
diff --git a/include/linux/capability.h b/include/linux/capability.h
index f314275d4e3f..00690ff92edf 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -40,8 +40,6 @@ struct inode;
struct dentry;
struct user_namespace;
-struct user_namespace *current_user_ns(void);
-
extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_init_eff_set;
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7f437036baa4..915af3095b39 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -33,6 +33,18 @@ struct ccp_cmd;
*/
int ccp_present(void);
+#define CCP_VSIZE 16
+#define CCP_VMASK ((unsigned int)((1 << CCP_VSIZE) - 1))
+#define CCP_VERSION(v, r) ((unsigned int)((v << CCP_VSIZE) \
+ | (r & CCP_VMASK)))
+
+/**
+ * ccp_version - get the version of the CCP
+ *
+ * Returns a positive version number, or zero if no CCP
+ */
+unsigned int ccp_version(void);
+
/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
@@ -65,6 +77,11 @@ static inline int ccp_present(void)
return -ENODEV;
}
+static inline unsigned int ccp_version(void)
+{
+ return 0;
+}
+
static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
{
return -ENODEV;
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index c1ef6f14e7be..ae2f66833762 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -75,6 +75,7 @@
#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
+#define CEPH_FEATURE_FS_FILE_LAYOUT_V2 (1ULL<<58) /* file_layout_t */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -104,6 +105,7 @@ static inline u64 ceph_sanitize_features(u64 features)
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
@@ -126,6 +128,7 @@ static inline u64 ceph_sanitize_features(u64 features)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index d7d072a25c27..37f28bf55ce4 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -198,8 +198,8 @@ struct ceph_client_mount {
#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
struct ceph_mon_subscribe_item {
- __le64 have_version; __le64 have;
- __u8 onetime;
+ __le64 start;
+ __u8 flags;
} __attribute__ ((packed));
struct ceph_mon_subscribe_ack {
@@ -376,7 +376,8 @@ union ceph_mds_request_args {
__le32 stripe_count; /* ... */
__le32 object_size;
__le32 file_replication;
- __le32 unused; /* used to be preferred osd */
+ __le32 mask; /* CEPH_CAP_* */
+ __le32 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 3e3799cdc6e6..e7975e4681e1 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -47,7 +47,6 @@ struct ceph_options {
unsigned long mount_timeout; /* jiffies */
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
- unsigned long monc_ping_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -68,7 +67,12 @@ struct ceph_options {
#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
-#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
+
+#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
+#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
+#define CEPH_MONC_PING_TIMEOUT msecs_to_jiffies(30 * 1000)
+#define CEPH_MONC_HUNT_BACKOFF 2
+#define CEPH_MONC_HUNT_MAX_MULT 10
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 81810dc21f06..e230e7ed60d3 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -68,18 +68,24 @@ struct ceph_mon_client {
bool hunting;
int cur_mon; /* last monitor i contacted */
- unsigned long sub_sent, sub_renew_after;
+ unsigned long sub_renew_after;
+ unsigned long sub_renew_sent;
struct ceph_connection con;
+ bool had_a_connection;
+ int hunt_mult; /* [1..CEPH_MONC_HUNT_MAX_MULT] */
+
/* pending generic requests */
struct rb_root generic_request_tree;
int num_generic_requests;
u64 last_tid;
- /* mds/osd map */
- int want_mdsmap;
- int want_next_osdmap; /* 1 = want, 2 = want+asked */
- u32 have_osdmap, have_mdsmap;
+ /* subs, indexed with CEPH_SUB_* */
+ struct {
+ struct ceph_mon_subscribe_item item;
+ bool want;
+ u32 have; /* epoch */
+ } subs[3];
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_file;
@@ -93,14 +99,23 @@ extern int ceph_monmap_contains(struct ceph_monmap *m,
extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
+enum {
+ CEPH_SUB_MDSMAP = 0,
+ CEPH_SUB_MONMAP,
+ CEPH_SUB_OSDMAP,
+};
+
+extern const char *ceph_sub_str[];
+
/*
* The model here is to indicate that we need a new map of at least
- * epoch @want, and also call in when we receive a map. We will
+ * epoch @epoch, and also call in when we receive a map. We will
* periodically rerequest the map from the monitor cluster until we
* get what we want.
*/
-extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
-extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
+bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
+ bool continuous);
+void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch);
extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7506b485bb6d..4343df806710 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -43,7 +43,8 @@ struct ceph_osd {
};
-#define CEPH_OSD_MAX_OP 3
+#define CEPH_OSD_SLAB_OPS 2
+#define CEPH_OSD_MAX_OPS 16
enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_NONE = 0,
@@ -77,7 +78,10 @@ struct ceph_osd_data {
struct ceph_osd_req_op {
u16 op; /* CEPH_OSD_OP_* */
u32 flags; /* CEPH_OSD_OP_FLAG_* */
- u32 payload_len;
+ u32 indata_len; /* request */
+ u32 outdata_len; /* reply */
+ s32 rval;
+
union {
struct ceph_osd_data raw_data_in;
struct {
@@ -136,7 +140,6 @@ struct ceph_osd_request {
/* request osd ops array */
unsigned int r_num_ops;
- struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP];
/* these are updated on each send */
__le32 *r_request_osdmap_epoch;
@@ -148,8 +151,6 @@ struct ceph_osd_request {
struct ceph_eversion *r_request_reassert_version;
int r_result;
- int r_reply_op_len[CEPH_OSD_MAX_OP];
- s32 r_reply_op_result[CEPH_OSD_MAX_OP];
int r_got_reply;
int r_linger;
@@ -174,6 +175,8 @@ struct ceph_osd_request {
unsigned long r_stamp; /* send OR check time */
struct ceph_snap_context *r_snapc; /* snap context for writes */
+
+ struct ceph_osd_req_op r_ops[];
};
struct ceph_request_redirect {
@@ -263,6 +266,8 @@ extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
u64 truncate_size, u32 truncate_seq);
extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
unsigned int which, u64 length);
+extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
+ unsigned int which, u64 offset_inc);
extern struct ceph_osd_data *osd_req_op_extent_osd_data(
struct ceph_osd_request *osd_req,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 789471dba6fb..3e39ae5bc799 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -45,6 +45,7 @@ enum {
CSS_NO_REF = (1 << 0), /* no reference counting for this css */
CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+ CSS_VISIBLE = (1 << 3), /* css is visible to userland */
};
/* bits in struct cgroup flags field */
@@ -190,12 +191,13 @@ struct css_set {
/*
* If this cset is acting as the source of migration the following
- * two fields are set. mg_src_cgrp is the source cgroup of the
- * on-going migration and mg_dst_cset is the destination cset the
- * target tasks on this cset should be migrated to. Protected by
- * cgroup_mutex.
+ * two fields are set. mg_src_cgrp and mg_dst_cgrp are
+ * respectively the source and destination cgroups of the on-going
+ * migration. mg_dst_cset is the destination cset the target tasks
+ * on this cset should be migrated to. Protected by cgroup_mutex.
*/
struct cgroup *mg_src_cgrp;
+ struct cgroup *mg_dst_cgrp;
struct css_set *mg_dst_cset;
/*
@@ -210,6 +212,9 @@ struct css_set {
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
+ /* dead and being drained, ignore for migration */
+ bool dead;
+
/* For RCU-protected deletion */
struct rcu_head rcu_head;
};
@@ -253,13 +258,14 @@ struct cgroup {
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_subsys_mask is the
- * effective one which may have more subsystems enabled.
- * Controller knobs are made available iff it's enabled in
- * ->subtree_control.
+ * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * one which may have more subsystems enabled. Controller knobs
+ * are made available iff it's enabled in ->subtree_control.
*/
- unsigned int subtree_control;
- unsigned int child_subsys_mask;
+ u16 subtree_control;
+ u16 subtree_ss_mask;
+ u16 old_subtree_control;
+ u16 old_subtree_ss_mask;
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
@@ -434,7 +440,6 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
- void (*css_e_css_changed)(struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
@@ -446,7 +451,20 @@ struct cgroup_subsys {
void (*free)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
- int early_init;
+ bool early_init:1;
+
+ /*
+ * If %true, the controller, on the default hierarchy, doesn't show
+ * up in "cgroup.controllers" or "cgroup.subtree_control", is
+ * implicitly enabled on all cgroups on the default hierarchy, and
+ * bypasses the "no internal process" constraint. This is for
+ * utility type controllers which is transparent to userland.
+ *
+ * An implicit controller can be stolen from the default hierarchy
+ * anytime and thus must be okay with offline csses from previous
+ * hierarchies coexisting with csses for the current one.
+ */
+ bool implicit_on_dfl:1;
/*
* If %false, this subsystem is properly hierarchical -
@@ -460,8 +478,8 @@ struct cgroup_subsys {
* cases. Eventually, all subsystems will be made properly
* hierarchical and this will go away.
*/
- bool broken_hierarchy;
- bool warned_broken_hierarchy;
+ bool broken_hierarchy:1;
+ bool warned_broken_hierarchy:1;
/* the following two fields are initialized automtically during boot */
int id;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 2162dca88dc0..a20320c666fd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -17,6 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
+#include <linux/nsproxy.h>
+#include <linux/types.h>
+#include <linux/ns_common.h>
+#include <linux/nsproxy.h>
+#include <linux/user_namespace.h>
#include <linux/cgroup-defs.h>
@@ -611,4 +616,48 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
+struct cgroup_namespace {
+ atomic_t count;
+ struct ns_common ns;
+ struct user_namespace *user_ns;
+ struct css_set *root_cset;
+};
+
+extern struct cgroup_namespace init_cgroup_ns;
+
+#ifdef CONFIG_CGROUPS
+
+void free_cgroup_ns(struct cgroup_namespace *ns);
+
+struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
+ struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns);
+
+char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline struct cgroup_namespace *
+copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns)
+{
+ return old_ns;
+}
+
+#endif /* !CONFIG_CGROUPS */
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns)
+ atomic_inc(&ns->count);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns && atomic_dec_and_test(&ns->count))
+ free_cgroup_ns(ns);
+}
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1143e38555a4..da95258127aa 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,7 +25,7 @@
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
+#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */
#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
@@ -276,6 +276,8 @@ struct clk_fixed_rate {
u8 flags;
};
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
extern const struct clk_ops clk_fixed_rate_ops;
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
@@ -283,7 +285,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy);
-
+void clk_unregister_fixed_rate(struct clk *clk);
void of_fixed_clk_setup(struct device_node *np);
/**
@@ -314,6 +316,8 @@ struct clk_gate {
spinlock_t *lock;
};
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
#define CLK_GATE_SET_TO_DISABLE BIT(0)
#define CLK_GATE_HIWORD_MASK BIT(1)
@@ -376,6 +380,8 @@ struct clk_divider {
spinlock_t *lock;
};
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
#define CLK_DIVIDER_ONE_BASED BIT(0)
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
#define CLK_DIVIDER_ALLOW_ZERO BIT(2)
@@ -385,6 +391,7 @@ struct clk_divider {
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
+extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
@@ -440,6 +447,8 @@ struct clk_mux {
spinlock_t *lock;
};
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
#define CLK_MUX_HIWORD_MASK BIT(2)
@@ -483,10 +492,13 @@ struct clk_fixed_factor {
unsigned int div;
};
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
extern const struct clk_ops clk_fixed_factor_ops;
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+void clk_unregister_fixed_factor(struct clk *clk);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
@@ -514,6 +526,8 @@ struct clk_fractional_divider {
spinlock_t *lock;
};
+#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
+
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
@@ -550,6 +564,8 @@ struct clk_multiplier {
spinlock_t *lock;
};
+#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
+
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
@@ -579,6 +595,8 @@ struct clk_composite {
const struct clk_ops *gate_ops;
};
+#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
+
struct clk *clk_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
@@ -601,13 +619,13 @@ struct clk_gpio {
struct gpio_desc *gpiod;
};
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
-void of_gpio_clk_gate_setup(struct device_node *node);
-
/**
* struct clk_gpio_mux - gpio controlled clock multiplexer
*
@@ -623,8 +641,6 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, unsigned gpio,
bool active_low, unsigned long flags);
-void of_gpio_mux_clk_setup(struct device_node *node);
-
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
@@ -700,7 +716,7 @@ void of_clk_del_provider(struct device_node *np);
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
-int of_clk_get_parent_count(struct device_node *np);
+unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
const char *of_clk_get_parent_name(struct device_node *np, int index);
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 1e6932222e11..17f413bbbedf 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -16,18 +16,6 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_pmc_base;
-
-#define at91_pmc_read(field) \
- readl_relaxed(at91_pmc_base + field)
-
-#define at91_pmc_write(field, value) \
- writel_relaxed(value, at91_pmc_base + field)
-#else
-.extern at91_pmc_base
-#endif
-
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/renesas.h
index cb19cc1865ca..7adfd80fbf55 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/renesas.h
@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
-#ifndef __LINUX_CLK_SHMOBILE_H_
-#define __LINUX_CLK_SHMOBILE_H_
+#ifndef __LINUX_CLK_RENESAS_H_
+#define __LINUX_CLK_RENESAS_H_
#include <linux/types.h>
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 9a638601cb09..dc5164a6df29 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -23,8 +23,8 @@
* @mult_div1_reg: register containing the DPLL M and N bitfields
* @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
* @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg
- * @clk_bypass: struct clk pointer to the clock's bypass clock input
- * @clk_ref: struct clk pointer to the clock's reference clock input
+ * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input
+ * @clk_ref: struct clk_hw pointer to the clock's reference clock input
* @control_reg: register containing the DPLL mode bitfield
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
* @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
@@ -69,8 +69,8 @@ struct dpll_data {
void __iomem *mult_div1_reg;
u32 mult_mask;
u32 div1_mask;
- struct clk *clk_bypass;
- struct clk *clk_ref;
+ struct clk_hw *clk_bypass;
+ struct clk_hw *clk_ref;
void __iomem *control_reg;
u32 enable_mask;
unsigned long last_rounded_rate;
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 08bffcc466de..c2c04f7cbe8a 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -44,8 +44,7 @@ struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, const char *, struct device *);
-int clk_register_clkdev(struct clk *, const char *, const char *, ...)
- __printf(3, 4);
+int clk_register_clkdev(struct clk *, const char *, const char *);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index bdcf358dfce2..0d442e34c349 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
static inline void
-clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
+clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec)
{
- return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
+ return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec);
}
extern void clockevents_suspend(void);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 6013021a3b39..a307bf62974f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -118,6 +118,23 @@ struct clocksource {
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
+{
+ /* freq = cyc/from
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = from/freq * 2^shift
+ * mult = from * 2^shift / freq
+ * mult = (from<<shift) / freq
+ */
+ u64 tmp = ((u64)from) << shift_constant;
+
+ tmp += freq/2; /* round for do_div */
+ do_div(tmp, freq);
+
+ return (u32)tmp;
+}
+
/**
* clocksource_khz2mult - calculates mult from khz and shift
* @khz: Clocksource frequency in KHz
@@ -128,19 +145,7 @@ struct clocksource {
*/
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
- /* khz = cyc/(Million ns)
- * mult/2^shift = ns/cyc
- * mult = ns/cyc * 2^shift
- * mult = 1Million/khz * 2^shift
- * mult = 1000000 * 2^shift / khz
- * mult = (1000000<<shift) / khz
- */
- u64 tmp = ((u64)1000000) << shift_constant;
-
- tmp += khz/2; /* round for do_div */
- do_div(tmp, khz);
-
- return (u32)tmp;
+ return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
}
/**
@@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
*/
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{
- /* hz = cyc/(Billion ns)
- * mult/2^shift = ns/cyc
- * mult = ns/cyc * 2^shift
- * mult = 1Billion/hz * 2^shift
- * mult = 1000000000 * 2^shift / hz
- * mult = (1000000000<<shift) / hz
- */
- u64 tmp = ((u64)1000000000) << shift_constant;
-
- tmp += hz/2; /* round for do_div */
- do_div(tmp, hz);
-
- return (u32)tmp;
+ return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
}
/**
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4cd4ddf64cc7..d7c8de583a23 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
+extern int kcompactd_run(int nid);
+extern void kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+
#else
static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
@@ -84,6 +88,18 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return true;
}
+static inline int kcompactd_run(int nid)
+{
+ return 0;
+}
+static inline void kcompactd_stop(int nid)
+{
+}
+
+static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+}
+
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
diff --git a/include/linux/compat.h b/include/linux/compat.h
index a76c9172b2eb..f964ef79e0ad 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -5,6 +5,8 @@
* syscall compatibility layer.
*/
+#include <linux/types.h>
+
#ifdef CONFIG_COMPAT
#include <linux/stat.h>
@@ -340,6 +342,12 @@ asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
@@ -713,9 +721,22 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
int, const char __user *);
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else
#define is_compat_task() (0)
+static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d1e49d52b640..de179993e039 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -10,3 +10,8 @@
#undef uninitialized_var
#define uninitialized_var(x) x = *(&(x))
#endif
+
+/* same as gcc, this was present in clang-2.6 so we can assume it works
+ * with any version that can compile the kernel
+ */
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 48f5aab117ae..b5ff9881bef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -20,12 +20,14 @@
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
-#else
+#else /* CONFIG_SPARSE_RCU_POINTER */
# define __rcu
-#endif
+#endif /* CONFIG_SPARSE_RCU_POINTER */
+# define __private __attribute__((noderef))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
-#else
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
# define __user
# define __kernel
# define __safe
@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __percpu
# define __rcu
# define __pmem
-#endif
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+#endif /* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
@@ -263,8 +267,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
- * compile-time warning.
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
+ * least two memcpy()s: one for the __builtin_memcpy() and then one for
+ * the macro doing the copy of variable - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index f8165c129ccb..485fe5519448 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -96,7 +96,8 @@ struct config_group {
struct config_item cg_item;
struct list_head cg_children;
struct configfs_subsystem *cg_subsys;
- struct config_group **default_groups;
+ struct list_head default_groups;
+ struct list_head group_entry;
};
extern void config_group_init(struct config_group *group);
@@ -123,6 +124,12 @@ extern struct config_item *config_group_find_item(struct config_group *,
const char *);
+static inline void configfs_add_default_group(struct config_group *new_group,
+ struct config_group *group)
+{
+ list_add_tail(&new_group->group_entry, &group->default_groups);
+}
+
struct configfs_attribute {
const char *ca_name;
struct module *ca_owner;
@@ -251,6 +258,8 @@ int configfs_register_group(struct config_group *parent_group,
struct config_group *group);
void configfs_unregister_group(struct config_group *group);
+void configfs_remove_default_groups(struct config_group *group);
+
struct config_group *
configfs_register_default_group(struct config_group *parent_group,
const char *name,
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
new file mode 100644
index 000000000000..7d410260661b
--- /dev/null
+++ b/include/linux/coresight-pmu.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_CORESIGHT_PMU_H
+#define _LINUX_CORESIGHT_PMU_H
+
+#define CORESIGHT_ETM_PMU_NAME "cs_etm"
+#define CORESIGHT_ETM_PMU_SEED 0x10
+
+/* ETMv3.5/PTM's ETMCR config bit */
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_TS 28
+
+static inline int coresight_get_trace_id(int cpu)
+{
+ /*
+ * A trace ID of value 0 is invalid, so let's start at some
+ * random value that fits in 7 bits and go from there. Since
+ * the common convention is to have data trace IDs be I(N) + 1,
+ * set instruction trace IDs as a function of the CPU number.
+ */
+ return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
+}
+
+#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a7cabfa23b55..385d62e64abb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/perf_event.h>
#include <linux/sched.h>
/* Peripheral id registers (0xFD0-0xFEC) */
@@ -152,7 +153,6 @@ struct coresight_connection {
by @coresight_ops.
* @dev: The device entity associated to this component.
* @refcnt: keep track of what is in use.
- * @path_link: link of current component into the path being enabled.
* @orphan: true if the component has connections that haven't been linked.
* @enable: 'true' if component is currently part of an active path.
* @activated: 'true' only if a _sink_ has been activated. A sink can be
@@ -168,7 +168,6 @@ struct coresight_device {
const struct coresight_ops *ops;
struct device dev;
atomic_t *refcnt;
- struct list_head path_link;
bool orphan;
bool enable; /* true only if configured as part of a path */
bool activated; /* true only if a sink is part of a path */
@@ -183,12 +182,29 @@ struct coresight_device {
/**
* struct coresight_ops_sink - basic operations for a sink
* Operations available for sinks
- * @enable: enables the sink.
- * @disable: disables the sink.
+ * @enable: enables the sink.
+ * @disable: disables the sink.
+ * @alloc_buffer: initialises perf's ring buffer for trace collection.
+ * @free_buffer: release memory allocated in @get_config.
+ * @set_buffer: initialises buffer mechanic before a trace session.
+ * @reset_buffer: finalises buffer mechanic after a trace session.
+ * @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev);
+ int (*enable)(struct coresight_device *csdev, u32 mode);
void (*disable)(struct coresight_device *csdev);
+ void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
+ void **pages, int nr_pages, bool overwrite);
+ void (*free_buffer)(void *config);
+ int (*set_buffer)(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config);
+ unsigned long (*reset_buffer)(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config, bool *lost);
+ void (*update_buffer)(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config);
};
/**
@@ -205,14 +221,18 @@ struct coresight_ops_link {
/**
* struct coresight_ops_source - basic operations for a source
* Operations available for sources.
+ * @cpu_id: returns the value of the CPU number this component
+ * is associated to.
* @trace_id: returns the value of the component's trace ID as known
- to the HW.
+ * to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
*/
struct coresight_ops_source {
+ int (*cpu_id)(struct coresight_device *csdev);
int (*trace_id)(struct coresight_device *csdev);
- int (*enable)(struct coresight_device *csdev);
+ int (*enable)(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode);
void (*disable)(struct coresight_device *csdev);
};
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d2ca8c38f9c4..f9b1fab4388a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -16,6 +16,7 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
+#include <linux/cpuhotplug.h>
struct device;
struct device_node;
@@ -27,6 +28,9 @@ struct cpu {
struct device dev;
};
+extern void boot_cpu_init(void);
+extern void boot_cpu_state_init(void);
+
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
@@ -74,7 +78,7 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- CPU_PRI_SMPBOOT = 9,
+
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -97,9 +101,7 @@ enum {
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
-#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
- * idle loop. */
-#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
+#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
@@ -118,6 +120,7 @@ enum {
#ifdef CONFIG_SMP
+extern bool cpuhp_tasks_frozen;
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
#define cpu_notifier(fn, pri) { \
@@ -167,7 +170,6 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
}
#endif
-void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
@@ -177,6 +179,7 @@ extern void cpu_maps_update_done(void);
#define cpu_notifier_register_done cpu_maps_update_done
#else /* CONFIG_SMP */
+#define cpuhp_tasks_frozen 0
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
@@ -215,10 +218,6 @@ static inline void cpu_notifier_register_done(void)
{
}
-static inline void smpboot_thread_init(void)
-{
-}
-
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -265,11 +264,6 @@ static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
-enum cpuhp_state {
- CPUHP_OFFLINE,
- CPUHP_ONLINE,
-};
-
void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
@@ -280,14 +274,15 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
-DECLARE_PER_CPU(bool, cpu_dead_idle);
-
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
+void cpuhp_report_idle_dead(void);
+#else
+static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 88a4215125bc..718e8725de8a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -80,7 +80,6 @@ struct cpufreq_policy {
unsigned int last_policy; /* policy before unplug */
struct cpufreq_governor *governor; /* see below */
void *governor_data;
- bool governor_enabled; /* governor start/stop flag */
char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
@@ -100,10 +99,6 @@ struct cpufreq_policy {
* - Any routine that will write to the policy structure and/or may take away
* the policy altogether (eg. CPU hotplug), will hold this lock in write
* mode before doing so.
- *
- * Additional rules:
- * - Lock should not be held across
- * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
*/
struct rw_semaphore rwsem;
@@ -464,29 +459,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
-/* CPUFREQ DEFAULT GOVERNOR */
-/*
- * Performance governor is fallback governor if any other gov failed to auto
- * load due latency restrictions
- */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
-extern struct cpufreq_governor cpufreq_gov_performance;
-#endif
-#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
-extern struct cpufreq_governor cpufreq_gov_powersave;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
-extern struct cpufreq_governor cpufreq_gov_userspace;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
-extern struct cpufreq_governor cpufreq_gov_ondemand;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
-extern struct cpufreq_governor cpufreq_gov_conservative;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
-#endif
+struct cpufreq_governor *cpufreq_default_governor(void);
+struct cpufreq_governor *cpufreq_fallback_governor(void);
/*********************************************************************
* FREQUENCY TABLE HELPERS *
@@ -525,16 +499,6 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
}
#endif
-static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
-{
- while ((*pos)->frequency != CPUFREQ_TABLE_END)
- if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID)
- return true;
- else
- (*pos)++;
- return false;
-}
-
/*
* cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -551,8 +515,11 @@ static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
* @table: the cpufreq_frequency_table * to iterate over.
*/
-#define cpufreq_for_each_valid_entry(pos, table) \
- for (pos = table; cpufreq_next_valid(&pos); pos++)
+#define cpufreq_for_each_valid_entry(pos, table) \
+ for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
+ if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
+ continue; \
+ else
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
new file mode 100644
index 000000000000..5d68e15e46b7
--- /dev/null
+++ b/include/linux/cpuhotplug.h
@@ -0,0 +1,93 @@
+#ifndef __CPUHOTPLUG_H
+#define __CPUHOTPLUG_H
+
+enum cpuhp_state {
+ CPUHP_OFFLINE,
+ CPUHP_CREATE_THREADS,
+ CPUHP_NOTIFY_PREPARE,
+ CPUHP_BRINGUP_CPU,
+ CPUHP_AP_IDLE_DEAD,
+ CPUHP_AP_OFFLINE,
+ CPUHP_AP_NOTIFY_STARTING,
+ CPUHP_AP_ONLINE,
+ CPUHP_TEARDOWN_CPU,
+ CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_SMPBOOT_THREADS,
+ CPUHP_AP_NOTIFY_ONLINE,
+ CPUHP_AP_ONLINE_DYN,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_ONLINE,
+};
+
+int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu));
+
+/**
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function
+ * @teardown: teardown callback function
+ *
+ * Installs the callback functions and invokes the startup callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline int cpuhp_setup_state(enum cpuhp_state state,
+ const char *name,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu))
+{
+ return __cpuhp_setup_state(state, name, true, startup, teardown);
+}
+
+/**
+ * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function
+ * @teardown: teardown callback function
+ *
+ * Same as @cpuhp_setup_state except that no calls are executed are invoked
+ * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ */
+static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
+ const char *name,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu))
+{
+ return __cpuhp_setup_state(state, name, false, startup, teardown);
+}
+
+void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
+
+/**
+ * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
+ * @state: The state for which the calls are removed
+ *
+ * Removes the callback functions and invokes the teardown callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline void cpuhp_remove_state(enum cpuhp_state state)
+{
+ __cpuhp_remove_state(state, true);
+}
+
+/**
+ * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
+ * teardown
+ * @state: The state for which the calls are removed
+ */
+static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
+{
+ __cpuhp_remove_state(state, false);
+}
+
+#ifdef CONFIG_SMP
+void cpuhp_online_idle(enum cpuhp_state state);
+#else
+static inline void cpuhp_online_idle(enum cpuhp_state state) { }
+#endif
+
+#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index fc14275ff34e..40cee6b77a93 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -607,8 +607,6 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
- *
- * This will eventually be a runtime variable, depending on nr_cpu_ids.
*/
static inline size_t cpumask_size(void)
{
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8d70e1361ecd..257db64562e5 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -377,7 +377,10 @@ extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
#define current_user_ns() (current_cred_xxx(user_ns))
#else
-#define current_user_ns() (&init_user_ns)
+static inline struct user_namespace *current_user_ns(void)
+{
+ return &init_user_ns;
+}
#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e71cb70a1ac2..99c94899ad0f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -54,7 +54,6 @@
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
-#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -137,7 +136,6 @@ struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
struct crypto_blkcipher;
-struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
struct skcipher_givcrypt_request;
@@ -187,11 +185,6 @@ struct cipher_desc {
void *info;
};
-struct hash_desc {
- struct crypto_hash *tfm;
- u32 flags;
-};
-
/**
* DOC: Block Cipher Algorithm Definitions
*
@@ -519,18 +512,6 @@ struct cipher_tfm {
void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
-struct hash_tfm {
- int (*init)(struct hash_desc *desc);
- int (*update)(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nsg);
- int (*final)(struct hash_desc *desc, u8 *out);
- int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
- unsigned int nsg, u8 *out);
- int (*setkey)(struct crypto_hash *tfm, const u8 *key,
- unsigned int keylen);
- unsigned int digestsize;
-};
-
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
@@ -543,7 +524,6 @@ struct compress_tfm {
#define crt_ablkcipher crt_u.ablkcipher
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
-#define crt_hash crt_u.hash
#define crt_compress crt_u.compress
struct crypto_tfm {
@@ -554,7 +534,6 @@ struct crypto_tfm {
struct ablkcipher_tfm ablkcipher;
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
- struct hash_tfm hash;
struct compress_tfm compress;
} crt_u;
@@ -581,10 +560,6 @@ struct crypto_comp {
struct crypto_tfm base;
};
-struct crypto_hash {
- struct crypto_tfm base;
-};
-
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -1577,233 +1552,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
dst, src);
}
-/**
- * DOC: Synchronous Message Digest API
- *
- * The synchronous message digest API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
- */
-
-static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_hash *)tfm;
-}
-
-static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
-{
- BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
- CRYPTO_ALG_TYPE_HASH_MASK);
- return __crypto_hash_cast(tfm);
-}
-
-/**
- * crypto_alloc_hash() - allocate synchronous message digest handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * message digest cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a message digest. The returned struct
- * crypto_hash is the cipher handle that is required for any subsequent
- * API invocation for that message digest.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_HASH;
- mask |= CRYPTO_ALG_TYPE_HASH_MASK;
-
- return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_hash() - zeroize and free message digest handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_hash(struct crypto_hash *tfm)
-{
- crypto_free_tfm(crypto_hash_tfm(tfm));
-}
-
-/**
- * crypto_has_hash() - Search for the availability of a message digest
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * message digest cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the message digest cipher is known to the kernel crypto
- * API; false otherwise
- */
-static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_HASH;
- mask |= CRYPTO_ALG_TYPE_HASH_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
-{
- return &crypto_hash_tfm(tfm)->crt_hash;
-}
-
-/**
- * crypto_hash_blocksize() - obtain block size for message digest
- * @tfm: cipher handle
- *
- * The block size for the message digest cipher referenced with the cipher
- * handle is returned.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
-}
-
-static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
-}
-
-/**
- * crypto_hash_digestsize() - obtain message digest size
- * @tfm: cipher handle
- *
- * The size for the message digest created by the message digest cipher
- * referenced with the cipher handle is returned.
- *
- * Return: message digest size
- */
-static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
-{
- return crypto_hash_crt(tfm)->digestsize;
-}
-
-static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
-{
- return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
-}
-
-static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
-{
- crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
-}
-
-static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
-{
- crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
-}
-
-/**
- * crypto_hash_init() - (re)initialize message digest handle
- * @desc: cipher request handle that to be filled by caller --
- * desc.tfm is filled with the hash cipher handle;
- * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * The call (re-)initializes the message digest referenced by the hash cipher
- * request handle. Any potentially existing state created by previous
- * operations is discarded.
- *
- * Return: 0 if the message digest initialization was successful; < 0 if an
- * error occurred
- */
-static inline int crypto_hash_init(struct hash_desc *desc)
-{
- return crypto_hash_crt(desc->tfm)->init(desc);
-}
-
-/**
- * crypto_hash_update() - add data to message digest for processing
- * @desc: cipher request handle
- * @sg: scatter / gather list pointing to the data to be added to the message
- * digest
- * @nbytes: number of bytes to be processed from @sg
- *
- * Updates the message digest state of the cipher handle pointed to by the
- * hash cipher request handle with the input data pointed to by the
- * scatter/gather list.
- *
- * Return: 0 if the message digest update was successful; < 0 if an error
- * occurred
- */
-static inline int crypto_hash_update(struct hash_desc *desc,
- struct scatterlist *sg,
- unsigned int nbytes)
-{
- return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
-}
-
-/**
- * crypto_hash_final() - calculate message digest
- * @desc: cipher request handle
- * @out: message digest output buffer -- The caller must ensure that the out
- * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
- * function).
- *
- * Finalize the message digest operation and create the message digest
- * based on all data added to the cipher handle. The message digest is placed
- * into the output buffer.
- *
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
-{
- return crypto_hash_crt(desc->tfm)->final(desc, out);
-}
-
-/**
- * crypto_hash_digest() - calculate message digest for a buffer
- * @desc: see crypto_hash_final()
- * @sg: see crypto_hash_update()
- * @nbytes: see crypto_hash_update()
- * @out: see crypto_hash_final()
- *
- * This function is a "short-hand" for the function calls of crypto_hash_init,
- * crypto_hash_update and crypto_hash_final. The parameters have the same
- * meaning as discussed for those separate three functions.
- *
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-static inline int crypto_hash_digest(struct hash_desc *desc,
- struct scatterlist *sg,
- unsigned int nbytes, u8 *out)
-{
- return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
-}
-
-/**
- * crypto_hash_setkey() - set key for message digest
- * @hash: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the message digest cipher. The cipher
- * handle must point to a keyed hash in order for this function to succeed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_hash_setkey(struct crypto_hash *hash,
- const u8 *key, unsigned int keylen)
-{
- return crypto_hash_crt(hash)->setkey(hash, key, keylen);
-}
-
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h
index 542888504994..05b97144d342 100644
--- a/include/linux/davinci_emac.h
+++ b/include/linux/davinci_emac.h
@@ -12,7 +12,7 @@
#define _LINUX_DAVINCI_EMAC_H
#include <linux/if_ether.h>
-#include <linux/memory.h>
+#include <linux/nvmem-consumer.h>
struct mdio_platform_data {
unsigned long bus_freq;
@@ -46,5 +46,5 @@ enum {
EMAC_VERSION_2, /* DM646x */
};
-void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context);
+void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context);
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 818e45078929..636dd59ab505 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,7 +7,7 @@
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
@@ -52,6 +52,8 @@ static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
}
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
- loff_t end);
+
+struct writeback_control;
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7781ce110503..7cb043d8f4e8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -228,6 +228,8 @@ struct dentry_operations {
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
+
extern seqlock_t rename_lock;
/*
@@ -246,6 +248,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
@@ -272,38 +275,8 @@ extern int have_submounts(struct dentry *);
* This adds the entry to the hash queues.
*/
extern void d_rehash(struct dentry *);
-
-/**
- * d_add - add dentry to hash queues
- * @entry: dentry to add
- * @inode: The inode to attach to this dentry
- *
- * This adds the entry to the hash queues and initializes @inode.
- * The entry was actually filled in earlier during d_alloc().
- */
-static inline void d_add(struct dentry *entry, struct inode *inode)
-{
- d_instantiate(entry, inode);
- d_rehash(entry);
-}
-
-/**
- * d_add_unique - add dentry to hash queues without aliasing
- * @entry: dentry to add
- * @inode: The inode to attach to this dentry
- *
- * This adds the entry to the hash queues and initializes @inode.
- * The entry was actually filled in earlier during d_alloc().
- */
-static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
-{
- struct dentry *res;
-
- res = d_instantiate_unique(entry, inode);
- d_rehash(res != NULL ? res : entry);
- return res;
-}
+extern void d_add(struct dentry *, struct inode *);
extern void dentry_update_name_case(struct dentry *, struct qstr *);
@@ -409,9 +382,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
*/
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- unsigned type = READ_ONCE(dentry->d_flags);
- smp_rmb();
- return type & DCACHE_ENTRY_TYPE;
+ return dentry->d_flags & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 19c066dce1da..981e53ab84e8 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -162,6 +162,14 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_automount(const char *name,
+ struct dentry *parent,
+ struct vfsmount *(*f)(void *),
+ void *data)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void debugfs_remove(struct dentry *dentry)
{ }
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ec1c61c87d89..0830c9e86f0d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -124,6 +124,8 @@ struct dm_dev {
char name[16];
};
+dev_t dm_get_dev_t(const char *path);
+
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
@@ -190,6 +192,13 @@ struct target_type {
#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
/*
+ * Indicates that a target may replace any target; even immutable targets.
+ * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
+ */
+#define DM_TARGET_WILDCARD 0x00000008
+#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
+
+/*
* Some targets need to be sent the same WRITE bio severals times so
* that they can send copies of it to different devices. This function
* examines any supplied bio and returns the number of copies of it the
@@ -231,10 +240,10 @@ struct dm_target {
unsigned num_write_same_bios;
/*
- * The minimum number of extra bytes allocated in each bio for the
- * target to use. dm_per_bio_data returns the data location.
+ * The minimum number of extra bytes allocated in each io for the
+ * target to use.
*/
- unsigned per_bio_data_size;
+ unsigned per_io_data_size;
/*
* If defined, this function is called to find out how many
diff --git a/include/linux/device.h b/include/linux/device.h
index 6d6f1fec092f..002c59728dbe 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -70,8 +70,11 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
* @match: Called, perhaps multiple times, whenever a new device or driver
- * is added for this bus. It should return a nonzero value if the
- * given device can be handled by the given driver.
+ * is added for this bus. It should return a positive value if the
+ * given device can be handled by the given driver and zero
+ * otherwise. It may also return error code if determining that
+ * the driver supports the device is not possible. In case of
+ * -EPROBE_DEFER it will queue the device for deferred probing.
* @uevent: Called when a device is added, removed, or a few other things
* that generate uevents to add the environment variables.
* @probe: Called when a new device or driver add to this bus, and callback
@@ -682,6 +685,18 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+static inline int devm_add_action_or_reset(struct device *dev,
+ void (*action)(void *), void *data)
+{
+ int ret;
+
+ ret = devm_add_action(dev, action, data);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
@@ -958,6 +973,11 @@ static inline void device_lock(struct device *dev)
mutex_lock(&dev->mutex);
}
+static inline int device_lock_interruptible(struct device *dev)
+{
+ return mutex_lock_interruptible(&dev->mutex);
+}
+
static inline int device_trylock(struct device *dev)
{
return mutex_trylock(&dev->mutex);
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 99c0be00b47c..5246239a4953 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -18,6 +18,7 @@ enum dma_attr {
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
+ DMA_ATTR_ALLOC_SINGLE_PAGES,
DMA_ATTR_MAX,
};
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f98bd7068d55..3fe90d494edb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
- * respectively pin the objet into memory.
+ * respectively pin the object into memory.
* @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@ struct dma_buf_ops {
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
- int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
- enum dma_data_direction);
- void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
- enum dma_data_direction);
+ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,10 +222,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
- enum dma_data_direction dir);
+int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 75857cda38e9..9ea9aba28049 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -70,6 +70,8 @@ struct dma_map_ops {
int is_phys;
};
+extern struct dma_map_ops dma_noop_ops;
+
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define DMA_MASK_NONE 0x0ULL
@@ -386,7 +388,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
- if (!ops->free)
+ if (!ops->free || !cpu_addr)
return;
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
@@ -641,31 +643,40 @@ static inline void dmam_release_declared_memory(struct device *dev)
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t gfp)
+static inline void *dma_alloc_wc(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
}
+#ifndef dma_alloc_writecombine
+#define dma_alloc_writecombine dma_alloc_wc
+#endif
-static inline void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr)
+static inline void dma_free_wc(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
}
+#ifndef dma_free_writecombine
+#define dma_free_writecombine dma_free_wc
+#endif
-static inline int dma_mmap_writecombine(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
+static inline int dma_mmap_wc(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
}
+#ifndef dma_mmap_writecombine
+#define dma_mmap_writecombine dma_mmap_wc
+#endif
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 16a1cad30c33..017433712833 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -357,8 +357,8 @@ enum dma_slave_buswidth {
*/
struct dma_slave_config {
enum dma_transfer_direction direction;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
enum dma_slave_buswidth src_addr_width;
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
@@ -401,6 +401,7 @@ enum dma_residue_granularity {
* since the enum dma_transfer_direction is not defined as bits for each
* type of direction, the dma controller should fill (1 << <TYPE>) and same
* should be checked by controller as well
+ * @max_burst: max burst capability per-transfer
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
@@ -411,6 +412,7 @@ struct dma_slave_caps {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 max_burst;
bool cmd_pause;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
@@ -654,6 +656,7 @@ struct dma_filter {
* the enum dma_transfer_direction is not defined as bits for
* each type of direction, the dma controller should fill (1 <<
* <TYPE>) and same should be checked by controller as well
+ * @max_burst: max burst capability per-transfer
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
@@ -712,6 +715,7 @@ struct dma_device {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 max_burst;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity;
diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h
index ff8b55359648..0de21e935976 100644
--- a/include/linux/dqblk_qtree.h
+++ b/include/linux/dqblk_qtree.h
@@ -15,6 +15,7 @@
#define QTREE_DEL_REWRITE 6
struct dquot;
+struct kqid;
/* Operations */
struct qtree_fmt_operations {
@@ -52,5 +53,6 @@ static inline int qtree_depth(struct qtree_mem_dqinfo *info)
entries *= epb;
return i;
}
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
#endif /* _LINUX_DQBLK_QTREE_H */
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 06791811e49d..885f587a3555 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -3,16 +3,25 @@
* platform description for 93xx46 EEPROMs.
*/
+struct gpio_desc;
+
struct eeprom_93xx46_platform_data {
unsigned char flags;
#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
#define EE_READONLY 0x08 /* forbid writing */
+ unsigned int quirks;
+/* Single word read transfers only; no sequential read. */
+#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
+/* Instructions such as EWEN are (addrlen + 2) in length. */
+#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+
/*
* optional hooks to control additional logic
* before and after spi transfer.
*/
void (*prepare)(void *);
void (*finish)(void *);
+ struct gpio_desc *select;
};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 47be3ad7d3e5..1626474567ac 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -97,6 +97,7 @@ typedef struct {
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
+#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */
#define EFI_MEMORY_MORE_RELIABLE \
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
@@ -299,7 +300,7 @@ typedef struct {
void *open_protocol_information;
void *protocols_per_handle;
void *locate_handle_buffer;
- void *locate_protocol;
+ efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
void *install_multiple_protocol_interfaces;
void *uninstall_multiple_protocol_interfaces;
void *calculate_crc32;
@@ -507,10 +508,6 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data);
-typedef efi_status_t
-efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
- u32 attr, unsigned long data_size, void *data);
-
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
typedef void efi_reset_system_t (int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data);
@@ -529,7 +526,9 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
unsigned long count,
u64 *max_size,
int *reset_type);
-typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+typedef efi_status_t efi_query_variable_store_t(u32 attributes,
+ unsigned long size,
+ bool nonblocking);
void efi_native_runtime_setup(void);
@@ -537,67 +536,92 @@ void efi_native_runtime_setup(void);
* EFI Configuration Table and GUID definitions
*/
#define NULL_GUID \
- EFI_GUID( 0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 )
+ EFI_GUID(0x00000000, 0x0000, 0x0000, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
#define MPS_TABLE_GUID \
- EFI_GUID( 0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID \
- EFI_GUID( 0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID \
- EFI_GUID( 0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 )
+ EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \
+ 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID \
- EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID \
- EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 )
+ EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \
+ 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
#define SAL_SYSTEM_TABLE_GUID \
- EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+ EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \
+ 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define HCDP_TABLE_GUID \
- EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 )
+ EFI_GUID(0xf951938d, 0x620b, 0x42ef, \
+ 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
#define UGA_IO_PROTOCOL_GUID \
- EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 )
+ EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \
+ 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID \
- EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
+ EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \
+ 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID \
- EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
+ EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \
+ 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID \
- EFI_GUID( 0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0 )
+ EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \
+ 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID \
- EFI_GUID( 0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+ EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \
+ 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
- EFI_GUID( 0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a )
+ EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \
+ 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
#define EFI_UGA_PROTOCOL_GUID \
- EFI_GUID( 0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39 )
+ EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \
+ 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID \
- EFI_GUID( 0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a )
+ EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \
+ 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
#define EFI_FILE_INFO_ID \
- EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+ EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \
+ 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
- EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+ EFI_GUID(0xb122a263, 0x3661, 0x4f68, \
+ 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
#define EFI_FILE_SYSTEM_GUID \
- EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+ EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \
+ 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define DEVICE_TREE_GUID \
- EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
+ EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \
+ 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
#define EFI_PROPERTIES_TABLE_GUID \
- EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
+ EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \
+ 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
+
+#define EFI_RNG_PROTOCOL_GUID \
+ EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
+ 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
typedef struct {
efi_guid_t guid;
@@ -851,8 +875,9 @@ extern struct efi {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
- efi_set_variable_nonblocking_t *set_variable_nonblocking;
+ efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
efi_update_capsule_t *update_capsule;
efi_query_capsule_caps_t *query_capsule_caps;
efi_get_next_high_mono_count_t *get_next_high_mono_count;
@@ -884,13 +909,17 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
#ifdef CONFIG_X86
extern void efi_late_init(void);
extern void efi_free_boot_services(void);
-extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+extern efi_status_t efi_query_variable_store(u32 attributes,
+ unsigned long size,
+ bool nonblocking);
extern void efi_find_mirror(void);
#else
static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
-static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+static inline efi_status_t efi_query_variable_store(u32 attributes,
+ unsigned long size,
+ bool nonblocking)
{
return EFI_SUCCESS;
}
@@ -1091,7 +1120,7 @@ struct efivar_operations {
efi_get_variable_t *get_variable;
efi_get_next_variable_t *get_next_variable;
efi_set_variable_t *set_variable;
- efi_set_variable_nonblocking_t *set_variable_nonblocking;
+ efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
};
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 653dc9c4ebac..e2b7bf27c03e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_ETHTOOL_H
#define _LINUX_ETHTOOL_H
+#include <linux/bitmap.h>
#include <linux/compat.h>
#include <uapi/linux/ethtool.h>
@@ -40,9 +41,6 @@ struct compat_ethtool_rxnfc {
#include <linux/rculist.h>
-extern int __ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd);
-
/**
* enum ethtool_phys_id_state - indicator state for physical identification
* @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
@@ -97,13 +95,70 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
+/* number of link mode bits/ulongs handled internally by kernel */
+#define __ETHTOOL_LINK_MODE_MASK_NBITS \
+ (__ETHTOOL_LINK_MODE_LAST + 1)
+
+/* declare a link mode bitmap */
+#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
+ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+
+/* drivers must ignore base.cmd and base.link_mode_masks_nwords
+ * fields, but they are allowed to overwrite them (will be ignored).
+ */
+struct ethtool_link_ksettings {
+ struct ethtool_link_settings base;
+ struct {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
+ } link_modes;
+};
+
+/**
+ * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ */
+#define ethtool_link_ksettings_zero_link_mode(ptr, name) \
+ bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS)
+
+/**
+ * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings
+ * link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ */
+#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+/**
+ * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ *
+ * Returns true/false.
+ */
+#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
+ test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+extern int
+__ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings);
+
/**
* struct ethtool_ops - optional netdev operations
- * @get_settings: Get various device settings including Ethernet link
+ * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
+ * API. Get various device settings including Ethernet link
* settings. The @cmd parameter is expected to have been cleared
- * before get_settings is called. Returns a negative error code or
- * zero.
- * @set_settings: Set various device settings including Ethernet link
+ * before get_settings is called. Returns a negative error code
+ * or zero.
+ * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
+ * API. Set various device settings including Ethernet link
* settings. Returns a negative error code or zero.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
@@ -201,6 +256,29 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
+ * It must check that the given queue number is valid. If neither a RX nor
+ * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
+ * queue has this number, set the inapplicable fields to ~0 and return 0.
+ * Returns a negative error code or zero.
+ * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
+ * It must check that the given queue number is valid. If neither a RX nor
+ * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
+ * queue has this number, ignore the inapplicable fields.
+ * Returns a negative error code or zero.
+ * @get_link_ksettings: When defined, takes precedence over the
+ * %get_settings method. Get various device settings
+ * including Ethernet link settings. The %cmd and
+ * %link_mode_masks_nwords fields should be ignored (use
+ * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
+ * change to them will be overwritten by kernel. Returns a
+ * negative error code or zero.
+ * @set_link_ksettings: When defined, takes precedence over the
+ * %set_settings method. Set various device settings including
+ * Ethernet link settings. The %cmd and %link_mode_masks_nwords
+ * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
+ * instead of the latter), any change to them will be overwritten
+ * by kernel. Returns a negative error code or zero.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -279,7 +357,13 @@ struct ethtool_ops {
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
const struct ethtool_tunable *, const void *);
-
-
+ int (*get_per_queue_coalesce)(struct net_device *, u32,
+ struct ethtool_coalesce *);
+ int (*set_per_queue_coalesce)(struct net_device *, u32,
+ struct ethtool_coalesce *);
+ int (*get_link_ksettings)(struct net_device *,
+ struct ethtool_link_ksettings *);
+ int (*set_link_ksettings)(struct net_device *,
+ const struct ethtool_link_ksettings *);
};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index fa05e04c5531..d8414502edb4 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,6 +97,12 @@ enum fid_type {
FILEID_FAT_WITH_PARENT = 0x72,
/*
+ * 128 bit child FID (struct lu_fid)
+ * 128 bit parent FID (struct lu_fid)
+ */
+ FILEID_LUSTRE = 0x97,
+
+ /*
* Filesystems must not use 0xff file ID.
*/
FILEID_INVALID = 0xff,
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index e59c3be92106..9eb215a155e0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,7 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -170,12 +170,12 @@ struct f2fs_extent {
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
-#define ADDRS_PER_PAGE(page, fi) \
- (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode) \
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -345,7 +345,7 @@ struct f2fs_summary {
struct summary_footer {
unsigned char entry_type; /* SUM_TYPE_XXX */
- __u32 check_sum; /* summary checksum */
+ __le32 check_sum; /* summary checksum */
} __packed;
#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -358,6 +358,12 @@ struct summary_footer {
sizeof(struct sit_journal_entry))
#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
+
/*
* frequently updated NAT/SIT entries can be stored in the spare area in
* summary blocks
@@ -387,18 +393,28 @@ struct sit_journal {
__u8 reserved[SIT_JOURNAL_RESERVED];
} __packed;
-/* 4KB-sized summary block structure */
-struct f2fs_summary_block {
- struct f2fs_summary entries[ENTRIES_IN_SUM];
+struct f2fs_extra_info {
+ __le64 kbytes_written;
+ __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
+struct f2fs_journal {
union {
__le16 n_nats;
__le16 n_sits;
};
- /* spare area is used by NAT or SIT journals */
+ /* spare area is used by NAT or SIT journals or extra info */
union {
struct nat_journal nat_j;
struct sit_journal sit_j;
+ struct f2fs_extra_info info;
};
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ struct f2fs_journal journal;
struct summary_footer footer;
} __packed;
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 3159a7dba034..9f4956d8601c 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -62,10 +62,9 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
+extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool should_failslab(size_t size, gfp_t gfpflags,
- unsigned long flags)
+static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 55433f86f0a3..dfe88351341f 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -296,9 +296,6 @@ struct fb_ops {
/* Draws cursor */
int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
- /* Rotates the display */
- void (*fb_rotate)(struct fb_info *info, int angle);
-
/* wait for blit idle, optional */
int (*fb_sync)(struct fb_info *info);
diff --git a/include/linux/fence.h b/include/linux/fence.h
index bb522011383b..2b17698b60b8 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -79,6 +79,8 @@ struct fence {
unsigned long flags;
ktime_t timestamp;
int status;
+ struct list_head child_list;
+ struct list_head active_list;
};
enum fence_flag_bits {
@@ -292,7 +294,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2)
if (WARN_ON(f1->context != f2->context))
return false;
- return f1->seqno - f2->seqno < INT_MAX;
+ return (int)(f1->seqno - f2->seqno) > 0;
}
/**
diff --git a/include/linux/frame.h b/include/linux/frame.h
new file mode 100644
index 000000000000..e6baaba3f1ae
--- /dev/null
+++ b/include/linux/frame.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_FRAME_H
+#define _LINUX_FRAME_H
+
+#ifdef CONFIG_STACK_VALIDATION
+/*
+ * This macro marks the given function's stack frame as "non-standard", which
+ * tells objtool to ignore the function when doing stack metadata validation.
+ * It should only be used in special cases where you're 100% sure it won't
+ * affect the reliability of frame pointers and kernel stack traces.
+ *
+ * For more information, see tools/objtool/Documentation/stack-validation.txt.
+ */
+#define STACK_FRAME_NON_STANDARD(func) \
+ static void __used __section(__func_stack_frame_non_standard) \
+ *__func_stack_frame_non_standard_##func = func
+
+#else /* !CONFIG_STACK_VALIDATION */
+
+#define STACK_FRAME_NON_STANDARD(func)
+
+#endif /* CONFIG_STACK_VALIDATION */
+
+#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 6b7fd9cf5ea2..dd03e837ebb7 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
* call this with locks held.
*/
static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- unsigned long delta, const enum hrtimer_mode mode)
+ u64 delta, const enum hrtimer_mode mode)
{
int __retval;
freezer_do_not_count();
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ae681002100a..14a97194b34b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -53,6 +53,8 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct fscrypt_info;
+struct fscrypt_operations;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -70,7 +72,7 @@ extern int sysctl_protected_hardlinks;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
@@ -320,6 +322,7 @@ struct writeback_control;
#define IOCB_EVENTFD (1 << 0)
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
+#define IOCB_HIPRI (1 << 3)
struct kiocb {
struct file *ki_filp;
@@ -678,6 +681,10 @@ struct inode {
struct hlist_head i_fsnotify_marks;
#endif
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ struct fscrypt_info *i_crypt_info;
+#endif
+
void *i_private; /* fs or device private pointer */
};
@@ -1323,6 +1330,8 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
+ const struct fscrypt_operations *s_cop;
+
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1540,11 +1549,6 @@ extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct de
extern int vfs_whiteout(struct inode *, struct dentry *);
/*
- * VFS dentry helper functions.
- */
-extern void dentry_unhash(struct dentry *dentry);
-
-/*
* VFS file helper functions.
*/
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
@@ -1709,9 +1713,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *)
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *);
+ unsigned long, loff_t *, int);
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *);
+ unsigned long, loff_t *, int);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
@@ -2259,7 +2263,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
- const char *, int);
+ const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
@@ -2576,7 +2580,22 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
+enum kernel_read_file_id {
+ READING_FIRMWARE = 1,
+ READING_MODULE,
+ READING_KEXEC_IMAGE,
+ READING_KEXEC_INITRAMFS,
+ READING_POLICY,
+ READING_MAX_ID
+};
+
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
+extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
+ enum kernel_read_file_id);
+extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t,
+ enum kernel_read_file_id);
+extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
+ enum kernel_read_file_id);
extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
extern struct file * open_exec(const char *);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
new file mode 100644
index 000000000000..cd91f75de49b
--- /dev/null
+++ b/include/linux/fscrypto.h
@@ -0,0 +1,434 @@
+/*
+ * General per-file encryption definition
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPTO_H
+#define _LINUX_FSCRYPTO_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_KEY_DERIVATION_NONCE_SIZE 16
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Flags
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE 16
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+#define FS_MAX_KEY_SIZE 64
+
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+ u32 mode;
+ u8 raw[FS_MAX_KEY_SIZE];
+ u32 size;
+} __packed;
+
+struct fscrypt_info {
+ u8 ci_data_mode;
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+ struct key *ci_keyring_key;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define FS_WRITE_PATH_FL 0x00000002
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+ u8 mode; /* Encryption mode for tfm */
+};
+
+struct fscrypt_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+ struct fscrypt_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+static inline int fscrypt_key_size(int mode)
+{
+ switch (mode) {
+ case FS_ENCRYPTION_MODE_AES_256_XTS:
+ return FS_AES_256_XTS_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_256_GCM:
+ return FS_AES_256_GCM_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_256_CBC:
+ return FS_AES_256_CBC_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_256_CTS:
+ return FS_AES_256_CTS_KEY_SIZE;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+#define FS_FNAME_NUM_SCATTER_ENTRIES 4
+#define FS_CRYPTO_BLOCK_SIZE 16
+#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 fscrypt_symlink_data_len(u32 l)
+{
+ if (l < FS_CRYPTO_BLOCK_SIZE)
+ l = FS_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct fscrypt_symlink_data) - 1);
+}
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*prepare_context)(struct inode *);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ int (*dummy_context)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
+{
+ if (size == fscrypt_key_size(mode))
+ return size;
+ return 0;
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return (inode->i_crypt_info != NULL);
+#else
+ return 0;
+#endif
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+extern const struct dentry_operations fscrypt_d_ops;
+#endif
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ d_set_d_op(dentry, &fscrypt_d_ops);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+int fscrypt_initialize(void);
+
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern int fscrypt_decrypt_page(struct page *);
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern void fscrypt_restore_control_page(struct page *);
+extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+ unsigned int);
+/* policy.c */
+extern int fscrypt_process_policy(struct inode *,
+ const struct fscrypt_policy *);
+extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int get_crypt_info(struct inode *);
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+extern void fscrypt_free_filename(struct fscrypt_name *);
+extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+ struct fscrypt_str *);
+#endif
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
+{
+ return;
+}
+
+static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
+ struct page *p)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
+ struct bio *b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_restore_control_page(struct page *p)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+ sector_t s, unsigned int f)
+{
+ return -EOPNOTSUPP;
+}
+
+/* policy.c */
+static inline int fscrypt_notsupp_process_policy(struct inode *i,
+ const struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_get_policy(struct inode *i,
+ struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
+ struct inode *i)
+{
+ return 0;
+}
+
+static inline int fscrypt_notsupp_inherit_context(struct inode *p,
+ struct inode *i, void *v, bool b)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
+ struct fscrypt_info *f)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (dir->i_sb->s_cop->is_encrypted(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
+{
+ /* never happens */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 84d971ff3fba..649e9171a9b3 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -189,4 +189,109 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
#endif
+struct ccsr_rcpm_v1 {
+ u8 res0000[4];
+ __be32 cdozsr; /* 0x0004 Core Doze Status Register */
+ u8 res0008[4];
+ __be32 cdozcr; /* 0x000c Core Doze Control Register */
+ u8 res0010[4];
+ __be32 cnapsr; /* 0x0014 Core Nap Status Register */
+ u8 res0018[4];
+ __be32 cnapcr; /* 0x001c Core Nap Control Register */
+ u8 res0020[4];
+ __be32 cdozpsr; /* 0x0024 Core Doze Previous Status Register */
+ u8 res0028[4];
+ __be32 cnappsr; /* 0x002c Core Nap Previous Status Register */
+ u8 res0030[4];
+ __be32 cwaitsr; /* 0x0034 Core Wait Status Register */
+ u8 res0038[4];
+ __be32 cwdtdsr; /* 0x003c Core Watchdog Detect Status Register */
+ __be32 powmgtcsr; /* 0x0040 PM Control&Status Register */
+#define RCPM_POWMGTCSR_SLP 0x00020000
+ u8 res0044[12];
+ __be32 ippdexpcr; /* 0x0050 IP Powerdown Exception Control Register */
+ u8 res0054[16];
+ __be32 cpmimr; /* 0x0064 Core PM IRQ Mask Register */
+ u8 res0068[4];
+ __be32 cpmcimr; /* 0x006c Core PM Critical IRQ Mask Register */
+ u8 res0070[4];
+ __be32 cpmmcmr; /* 0x0074 Core PM Machine Check Mask Register */
+ u8 res0078[4];
+ __be32 cpmnmimr; /* 0x007c Core PM NMI Mask Register */
+ u8 res0080[4];
+ __be32 ctbenr; /* 0x0084 Core Time Base Enable Register */
+ u8 res0088[4];
+ __be32 ctbckselr; /* 0x008c Core Time Base Clock Select Register */
+ u8 res0090[4];
+ __be32 ctbhltcr; /* 0x0094 Core Time Base Halt Control Register */
+ u8 res0098[4];
+ __be32 cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */
+};
+
+struct ccsr_rcpm_v2 {
+ u8 res_00[12];
+ __be32 tph10sr0; /* Thread PH10 Status Register */
+ u8 res_10[12];
+ __be32 tph10setr0; /* Thread PH10 Set Control Register */
+ u8 res_20[12];
+ __be32 tph10clrr0; /* Thread PH10 Clear Control Register */
+ u8 res_30[12];
+ __be32 tph10psr0; /* Thread PH10 Previous Status Register */
+ u8 res_40[12];
+ __be32 twaitsr0; /* Thread Wait Status Register */
+ u8 res_50[96];
+ __be32 pcph15sr; /* Physical Core PH15 Status Register */
+ __be32 pcph15setr; /* Physical Core PH15 Set Control Register */
+ __be32 pcph15clrr; /* Physical Core PH15 Clear Control Register */
+ __be32 pcph15psr; /* Physical Core PH15 Prev Status Register */
+ u8 res_c0[16];
+ __be32 pcph20sr; /* Physical Core PH20 Status Register */
+ __be32 pcph20setr; /* Physical Core PH20 Set Control Register */
+ __be32 pcph20clrr; /* Physical Core PH20 Clear Control Register */
+ __be32 pcph20psr; /* Physical Core PH20 Prev Status Register */
+ __be32 pcpw20sr; /* Physical Core PW20 Status Register */
+ u8 res_e0[12];
+ __be32 pcph30sr; /* Physical Core PH30 Status Register */
+ __be32 pcph30setr; /* Physical Core PH30 Set Control Register */
+ __be32 pcph30clrr; /* Physical Core PH30 Clear Control Register */
+ __be32 pcph30psr; /* Physical Core PH30 Prev Status Register */
+ u8 res_100[32];
+ __be32 ippwrgatecr; /* IP Power Gating Control Register */
+ u8 res_124[12];
+ __be32 powmgtcsr; /* Power Management Control & Status Reg */
+#define RCPM_POWMGTCSR_LPM20_RQ 0x00100000
+#define RCPM_POWMGTCSR_LPM20_ST 0x00000200
+#define RCPM_POWMGTCSR_P_LPM20_ST 0x00000100
+ u8 res_134[12];
+ __be32 ippdexpcr[4]; /* IP Powerdown Exception Control Reg */
+ u8 res_150[12];
+ __be32 tpmimr0; /* Thread PM Interrupt Mask Reg */
+ u8 res_160[12];
+ __be32 tpmcimr0; /* Thread PM Crit Interrupt Mask Reg */
+ u8 res_170[12];
+ __be32 tpmmcmr0; /* Thread PM Machine Check Interrupt Mask Reg */
+ u8 res_180[12];
+ __be32 tpmnmimr0; /* Thread PM NMI Mask Reg */
+ u8 res_190[12];
+ __be32 tmcpmaskcr0; /* Thread Machine Check Mask Control Reg */
+ __be32 pctbenr; /* Physical Core Time Base Enable Reg */
+ __be32 pctbclkselr; /* Physical Core Time Base Clock Select */
+ __be32 tbclkdivr; /* Time Base Clock Divider Register */
+ u8 res_1ac[4];
+ __be32 ttbhltcr[4]; /* Thread Time Base Halt Control Register */
+ __be32 clpcl10sr; /* Cluster PCL10 Status Register */
+ __be32 clpcl10setr; /* Cluster PCL30 Set Control Register */
+ __be32 clpcl10clrr; /* Cluster PCL30 Clear Control Register */
+ __be32 clpcl10psr; /* Cluster PCL30 Prev Status Register */
+ __be32 cddslpsetr; /* Core Domain Deep Sleep Set Register */
+ __be32 cddslpclrr; /* Core Domain Deep Sleep Clear Register */
+ __be32 cdpwroksetr; /* Core Domain Power OK Set Register */
+ __be32 cdpwrokclrr; /* Core Domain Power OK Clear Register */
+ __be32 cdpwrensr; /* Core Domain Power Enable Status Register */
+ __be32 cddslsr; /* Core Domain Deep Sleep Status Register */
+ u8 res_1e8[8];
+ __be32 dslpcntcr[8]; /* Deep Sleep Counter Cfg Register */
+ u8 res_300[3568];
+};
+
#endif
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 7ee1774edee5..0141f257d67b 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -16,15 +16,6 @@
#include <linux/slab.h>
#include <linux/bug.h>
-/*
- * fsnotify_d_instantiate - instantiate a dentry for inode
- */
-static inline void fsnotify_d_instantiate(struct dentry *dentry,
- struct inode *inode)
-{
- __fsnotify_d_instantiate(dentry, inode);
-}
-
/* Notify this dentry's parent about a child's events. */
static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 533c4408529a..1259e53d9296 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -290,14 +290,9 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
*/
-static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+static inline void __fsnotify_d_instantiate(struct dentry *dentry)
{
- if (!inode)
- return;
-
- spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
- spin_unlock(&dentry->d_lock);
}
/* called from fsnotify listeners, such as fanotify or dnotify */
@@ -396,7 +391,7 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
-static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+static inline void __fsnotify_d_instantiate(struct dentry *dentry)
{}
static inline u32 fsnotify_get_cookie(void)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index c2b340e23f62..dea12a6e413b 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -713,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+static inline unsigned long get_lock_parent_ip(void)
+{
+ unsigned long addr = CALLER_ADDR0;
+
+ if (!in_lock_functions(addr))
+ return addr;
+ addr = CALLER_ADDR1;
+ if (!in_lock_functions(addr))
+ return addr;
+ return CALLER_ADDR2;
+}
+
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
@@ -799,16 +811,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
*/
#define __notrace_funcgraph notrace
-/*
- * We want to which function is an entrypoint of a hardirq.
- * That will help us to put a signal on output.
- */
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-
-/* Limits of hardirq entrypoints */
-extern char __irqentry_text_start[];
-extern char __irqentry_text_end[];
-
#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
@@ -845,7 +847,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define __irq_entry
#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index af1f2b24bbe4..570383a41853 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -9,6 +9,11 @@
struct vm_area_struct;
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
@@ -48,7 +53,6 @@ struct vm_area_struct;
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
@@ -101,8 +105,6 @@ struct vm_area_struct;
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
- *
- * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
@@ -255,7 +257,7 @@ struct vm_area_struct;
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
- ~__GFP_KSWAPD_RECLAIM)
+ ~__GFP_RECLAIM)
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -329,22 +331,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
- * ZONES_SHIFT must be <= 2 on 32 bit platforms.
+ * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
-#if 16 * ZONES_SHIFT > BITS_PER_LONG
-#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
+#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
+/* ZONE_DEVICE is not a valid GFP zone specifier */
+#define GFP_ZONES_SHIFT 2
+#else
+#define GFP_ZONES_SHIFT ZONES_SHIFT
+#endif
+
+#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
+#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
#endif
#define GFP_ZONE_TABLE ( \
- (ZONE_NORMAL << 0 * ZONES_SHIFT) \
- | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
- | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
- | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
- | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
- | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
- | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
- | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
+ (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
+ | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
+ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
+ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
)
/*
@@ -369,8 +378,8 @@ static inline enum zone_type gfp_zone(gfp_t flags)
enum zone_type z;
int bit = (__force int) (flags & GFP_ZONEMASK);
- z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
- ((1 << ZONES_SHIFT) - 1);
+ z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
+ ((1 << GFP_ZONES_SHIFT) - 1);
VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
return z;
}
@@ -515,13 +524,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void page_alloc_init_late(void);
-#else
-static inline void page_alloc_init_late(void)
-{
-}
-#endif
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 82fda487453f..bee976f82788 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_GPIO_DRIVER_H
#define __LINUX_GPIO_DRIVER_H
+#include <linux/device.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/irq.h>
@@ -10,22 +11,21 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/kconfig.h>
-struct device;
struct gpio_desc;
struct of_phandle_args;
struct device_node;
struct seq_file;
+struct gpio_device;
#ifdef CONFIG_GPIOLIB
/**
* struct gpio_chip - abstract a GPIO controller
- * @label: for diagnostics
+ * @label: a functional name for the GPIO device, such as a part
+ * number or the name of the SoC IP-block implementing it.
+ * @gpiodev: the internal state holder, opaque struct
* @parent: optional parent device providing the GPIOs
- * @cdev: class device used by sysfs interface (may be NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
- * @data: per-instance data assigned by the driver
- * @list: links gpio_chips together for traversal
* @request: optional hook for chip-specific activation, such as
* enabling module power and clock; may sleep
* @free: optional hook for chip-specific deactivation, such as
@@ -52,7 +52,6 @@ struct seq_file;
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
- * @desc: array of ngpio descriptors. Private.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
@@ -107,11 +106,9 @@ struct seq_file;
*/
struct gpio_chip {
const char *label;
+ struct gpio_device *gpiodev;
struct device *parent;
- struct device *cdev;
struct module *owner;
- void *data;
- struct list_head list;
int (*request)(struct gpio_chip *chip,
unsigned offset);
@@ -141,7 +138,6 @@ struct gpio_chip {
struct gpio_chip *chip);
int base;
u16 ngpio;
- struct gpio_desc *desc;
const char *const *names;
bool can_sleep;
bool irq_not_threaded;
@@ -184,15 +180,6 @@ struct gpio_chip {
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
#endif
-#ifdef CONFIG_PINCTRL
- /*
- * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
- * describe the actual pin range which they serve in an SoC. This
- * information would be used by pinctrl subsystem to configure
- * corresponding pins for gpio usage.
- */
- struct list_head pin_ranges;
-#endif
};
extern const char *gpiochip_is_requested(struct gpio_chip *chip,
@@ -205,18 +192,24 @@ static inline int gpiochip_add(struct gpio_chip *chip)
return gpiochip_add_data(chip, NULL);
}
extern void gpiochip_remove(struct gpio_chip *chip);
+extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
+ void *data);
+extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip);
+
extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip, void *data));
/* lock/unlock as IRQ */
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
+
+/* Line status inquiry for drivers */
+bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
/* get driver data */
-static inline void *gpiochip_get_data(struct gpio_chip *chip)
-{
- return chip->data;
-}
+void *gpiochip_get_data(struct gpio_chip *chip);
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2ead22dd74a0..c98c6539e2c2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time
timer->node.expires = ktime_add_safe(time, delta);
}
-static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
{
timer->_softexpires = time;
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
@@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
/* Basic timer operations: */
extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- unsigned long range_ns, const enum hrtimer_mode mode);
+ u64 range_ns, const enum hrtimer_mode mode);
/**
* hrtimer_start - (re)start an hrtimer on the current CPU
@@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
static inline void hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
{
- unsigned long delta;
+ u64 delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
@@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk);
-extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
- unsigned long delta, const enum hrtimer_mode mode, int clock);
+ u64 delta,
+ const enum hrtimer_mode mode,
+ int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 459fd25b378e..79b0ef6aaa14 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -41,7 +41,8 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
- TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
@@ -71,12 +72,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
((__vma)->vm_flags & VM_HUGEPAGE))) && \
!((__vma)->vm_flags & VM_NOHUGEPAGE) && \
!is_vma_temporary_stack(__vma))
-#define transparent_hugepage_defrag(__vma) \
- ((transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
- (__vma)->vm_flags & VM_HUGEPAGE))
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -101,19 +96,21 @@ static inline int split_huge_page(struct page *page)
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address);
+ unsigned long address, bool freeze);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
if (pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
- __split_huge_pmd(__vma, __pmd, __address); \
+ __split_huge_pmd(__vma, __pmd, __address, \
+ false); \
} while (0)
-#if HPAGE_PMD_ORDER >= MAX_ORDER
-#error "hugepages can't be allocated by the buddy allocator"
-#endif
+
+void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
+ bool freeze, struct page *page);
+
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
@@ -178,6 +175,10 @@ static inline int split_huge_page(struct page *page)
static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
+
+static inline void split_huge_pmd_address(struct vm_area_struct *vma,
+ unsigned long address, bool freeze, struct page *page) {}
+
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 753dbad0bf94..aa0fadce9308 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -235,6 +235,7 @@ struct vmbus_channel_offer {
#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
#define VMBUS_CHANNEL_PARENT_OFFER 0x200
#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
struct vmpacket_descriptor {
u16 type;
@@ -391,6 +392,10 @@ enum vmbus_channel_message_type {
CHANNELMSG_VERSION_RESPONSE = 15,
CHANNELMSG_UNLOAD = 16,
CHANNELMSG_UNLOAD_RESPONSE = 17,
+ CHANNELMSG_18 = 18,
+ CHANNELMSG_19 = 19,
+ CHANNELMSG_20 = 20,
+ CHANNELMSG_TL_CONNECT_REQUEST = 21,
CHANNELMSG_COUNT
};
@@ -561,6 +566,13 @@ struct vmbus_channel_initiate_contact {
u64 monitor_page2;
} __packed;
+/* Hyper-V socket: guest's connect()-ing to host */
+struct vmbus_channel_tl_connect_request {
+ struct vmbus_channel_message_header header;
+ uuid_le guest_endpoint_id;
+ uuid_le host_service_id;
+} __packed;
+
struct vmbus_channel_version_response {
struct vmbus_channel_message_header header;
u8 version_supported;
@@ -633,6 +645,32 @@ enum hv_signal_policy {
HV_SIGNAL_POLICY_EXPLICIT,
};
+enum vmbus_device_type {
+ HV_IDE = 0,
+ HV_SCSI,
+ HV_FC,
+ HV_NIC,
+ HV_ND,
+ HV_PCIE,
+ HV_FB,
+ HV_KBD,
+ HV_MOUSE,
+ HV_KVP,
+ HV_TS,
+ HV_HB,
+ HV_SHUTDOWN,
+ HV_FCOPY,
+ HV_BACKUP,
+ HV_DM,
+ HV_UNKOWN,
+};
+
+struct vmbus_device {
+ u16 dev_type;
+ uuid_le guid;
+ bool perf_device;
+};
+
struct vmbus_channel {
/* Unique channel id */
int id;
@@ -728,6 +766,12 @@ struct vmbus_channel {
void (*sc_creation_callback)(struct vmbus_channel *new_sc);
/*
+ * Channel rescind callback. Some channels (the hvsock ones), need to
+ * register a callback which is invoked in vmbus_onoffer_rescind().
+ */
+ void (*chn_rescind_callback)(struct vmbus_channel *channel);
+
+ /*
* The spinlock to protect the structure. It is being used to protect
* test-and-set access to various attributes of the structure as well
* as all sc_list operations.
@@ -767,8 +811,30 @@ struct vmbus_channel {
* signaling control.
*/
enum hv_signal_policy signal_policy;
+ /*
+ * On the channel send side, many of the VMBUS
+ * device drivers explicity serialize access to the
+ * outgoing ring buffer. Give more control to the
+ * VMBUS device drivers in terms how to serialize
+ * accesss to the outgoing ring buffer.
+ * The default behavior will be to aquire the
+ * ring lock to preserve the current behavior.
+ */
+ bool acquire_ring_lock;
+
};
+static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
+{
+ c->acquire_ring_lock = state;
+}
+
+static inline bool is_hvsock_channel(const struct vmbus_channel *c)
+{
+ return !!(c->offermsg.offer.chn_flags &
+ VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
static inline void set_channel_signal_state(struct vmbus_channel *c,
enum hv_signal_policy policy)
{
@@ -790,6 +856,12 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
return c->per_channel_state;
}
+static inline void set_channel_pending_send_size(struct vmbus_channel *c,
+ u32 size)
+{
+ c->outbound.ring_buffer->pending_send_sz = size;
+}
+
void vmbus_onmessage(void *context);
int vmbus_request_offers(void);
@@ -801,6 +873,9 @@ int vmbus_request_offers(void);
void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void (*sc_cr_cb)(struct vmbus_channel *new_sc));
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+ void (*chn_rescind_cb)(struct vmbus_channel *));
+
/*
* Retrieve the (sub) channel on which to send an outgoing request.
* When a primary channel has multiple sub-channels, we choose a
@@ -940,6 +1015,20 @@ extern void vmbus_ontimer(unsigned long data);
struct hv_driver {
const char *name;
+ /*
+ * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
+ * channel flag, actually doesn't mean a synthetic device because the
+ * offer's if_type/if_instance can change for every new hvsock
+ * connection.
+ *
+ * However, to facilitate the notification of new-offer/rescind-offer
+ * from vmbus driver to hvsock driver, we can handle hvsock offer as
+ * a special vmbus device, and hence we need the below flag to
+ * indicate if the driver is the hvsock driver or not: we need to
+ * specially treat the hvosck offer & driver in vmbus_match().
+ */
+ bool hvsock;
+
/* the device type supported by this driver */
uuid_le dev_type;
const struct hv_vmbus_device_id *id_table;
@@ -959,6 +1048,8 @@ struct hv_device {
/* the device instance id of this device */
uuid_le dev_instance;
+ u16 vendor_id;
+ u16 device_id;
struct device device;
@@ -994,6 +1085,8 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
+
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t min, resource_size_t max,
resource_size_t size, resource_size_t align,
@@ -1158,6 +1251,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
struct hv_util_service {
u8 *recv_buffer;
+ void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
@@ -1242,4 +1336,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern __u32 vmbus_proto_version;
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+ const uuid_le *shv_host_servie_id);
#endif /* _HYPERV_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 452c0b0d2f32..3b1f6cef9513 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
@@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits {
};
#define WLAN_SA_QUERY_TR_ID_LEN 2
+#define WLAN_MEMBERSHIP_LEN 8
+#define WLAN_USER_POSITION_LEN 16
/**
* struct ieee80211_tpc_report_ie
@@ -991,6 +1001,11 @@ struct ieee80211_mgmt {
} __packed vht_opmode_notif;
struct {
u8 action_code;
+ u8 membership[WLAN_MEMBERSHIP_LEN];
+ u8 position[WLAN_USER_POSITION_LEN];
+ } __packed vht_group_notif;
+ struct {
+ u8 action_code;
u8 dialog_token;
u8 tpc_elem_id;
u8 tpc_elem_length;
@@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
@@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
+/* Defines the maximal number of MSDUs in an A-MSDU. */
+#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7)
+#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0)
+
+/*
+ * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY
+ * information element
+ */
+#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index a338a688ee4a..dcb89e3515db 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,10 +46,6 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
-/* values as per ieee8021QBridgeFdbAgingTime */
-#define BR_MIN_AGEING_TIME (10 * HZ)
-#define BR_MAX_AGEING_TIME (1000000 * HZ)
-
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index b84e49c3a738..174f43f43aff 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -24,6 +24,7 @@ struct team_pcpu_stats {
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
+ u32 rx_nohandler;
};
struct team;
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 9c9de11549a7..12f6fba6d21a 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,11 +37,6 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
-extern int sysctl_igmp_llm_reports;
-extern int sysctl_igmp_max_memberships;
-extern int sysctl_igmp_max_msf;
-extern int sysctl_igmp_qrv;
-
struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2fe939c73cd2..6670c3d25c58 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -119,6 +119,8 @@ struct st_sensor_bdu {
* @addr: address of the register.
* @mask_int1: mask to enable/disable IRQ on INT1 pin.
* @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * @addr_ihl: address to enable/disable active low on the INT lines.
+ * @mask_ihl: mask to enable/disable active low on the INT lines.
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
@@ -127,6 +129,8 @@ struct st_sensor_data_ready_irq {
u8 addr;
u8 mask_int1;
u8 mask_int2;
+ u8 addr_ihl;
+ u8 mask_ihl;
struct {
u8 en_addr;
u8 en_mask;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b5894118755f..b2b16772c651 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -180,18 +180,18 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: Sign: 's' or 'u' to specify signed or unsigned
+ * @scan_type: sign: 's' or 'u' to specify signed or unsigned
* realbits: Number of valid bits of data
- * storage_bits: Realbits + padding
+ * storagebits: Realbits + padding
* shift: Shift right by this before masking out
* realbits.
- * endianness: little or big endian
* repeat: Number of times real/storage bits
* repeats. When the repeat element is
* more than 1, then the type element in
* sysfs will show a repeat value.
* Otherwise, the number of repetitions is
* omitted.
+ * endianness: little or big endian
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_shared_by_type: What information is to be exported that is shared
@@ -448,7 +448,7 @@ struct iio_buffer_setup_ops {
* @buffer: [DRIVER] any buffer present
* @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [INTERN] lock used to prevent simultaneous device state
+ * @mlock: [DRIVER] lock used to prevent simultaneous device state
* changes
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 120ccc53fcb7..e6516cbbe9bf 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,8 +18,9 @@ extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask, int opened);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern int ima_module_check(struct file *file);
-extern int ima_fw_from_file(struct file *file, char *buf, size_t size);
+extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
+extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
+ enum kernel_read_file_id id);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -42,12 +43,13 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
-static inline int ima_module_check(struct file *file)
+static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
{
return 0;
}
-static inline int ima_fw_from_file(struct file *file, char *buf, size_t size)
+static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
+ enum kernel_read_file_id id)
{
return 0;
}
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
deleted file mode 100644
index 9a715cfa1fe3..000000000000
--- a/include/linux/inet_lro.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * linux/include/linux/inet_lro.h
- *
- * Large Receive Offload (ipv4 / tcp)
- *
- * (C) Copyright IBM Corp. 2007
- *
- * Authors:
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __INET_LRO_H_
-#define __INET_LRO_H_
-
-#include <net/ip.h>
-#include <net/tcp.h>
-
-/*
- * LRO statistics
- */
-
-struct net_lro_stats {
- unsigned long aggregated;
- unsigned long flushed;
- unsigned long no_desc;
-};
-
-/*
- * LRO descriptor for a tcp session
- */
-struct net_lro_desc {
- struct sk_buff *parent;
- struct sk_buff *last_skb;
- struct skb_frag_struct *next_frag;
- struct iphdr *iph;
- struct tcphdr *tcph;
- __wsum data_csum;
- __be32 tcp_rcv_tsecr;
- __be32 tcp_rcv_tsval;
- __be32 tcp_ack;
- u32 tcp_next_seq;
- u32 skb_tot_frags_len;
- u16 ip_tot_len;
- u16 tcp_saw_tstamp; /* timestamps enabled */
- __be16 tcp_window;
- int pkt_aggr_cnt; /* counts aggregated packets */
- int vlan_packet;
- int mss;
- int active;
-};
-
-/*
- * Large Receive Offload (LRO) Manager
- *
- * Fields must be set by driver
- */
-
-struct net_lro_mgr {
- struct net_device *dev;
- struct net_lro_stats stats;
-
- /* LRO features */
- unsigned long features;
-#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
-#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
- from received packets and eth protocol
- is still ETH_P_8021Q */
-
- /*
- * Set for generated SKBs that are not added to
- * the frag list in fragmented mode
- */
- u32 ip_summed;
- u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
- * or CHECKSUM_NONE */
-
- int max_desc; /* Max number of LRO descriptors */
- int max_aggr; /* Max number of LRO packets to be aggregated */
-
- int frag_align_pad; /* Padding required to properly align layer 3
- * headers in generated skb when using frags */
-
- struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
-
- /*
- * Optimized driver functions
- *
- * get_skb_header: returns tcp and ip header for packet in SKB
- */
- int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
- void **tcpudp_hdr, u64 *hdr_flags, void *priv);
-
- /* hdr_flags: */
-#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
-#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
-
- /*
- * get_frag_header: returns mac, tcp and ip header for packet in SKB
- *
- * @hdr_flags: Indicate what kind of LRO has to be done
- * (IPv4/IPv6/TCP/UDP)
- */
- int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
- void *priv);
-};
-
-/*
- * Processes a SKB
- *
- * @lro_mgr: LRO manager to use
- * @skb: SKB to aggregate
- * @priv: Private data that may be used by driver functions
- * (for example get_tcp_ip_hdr)
- */
-
-void lro_receive_skb(struct net_lro_mgr *lro_mgr,
- struct sk_buff *skb,
- void *priv);
-/*
- * Forward all aggregated SKBs held by lro_mgr to network stack
- */
-
-void lro_flush_all(struct net_lro_mgr *lro_mgr);
-
-#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index b449f378f995..aedb254abc37 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -142,6 +142,10 @@ void prepare_namespace(void);
void __init load_default_modules(void);
int __init init_rootfs(void);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
extern void (*late_time_init)(void);
extern bool initcall_debug;
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
index 5af7c66f1fca..586c8c95dcb0 100644
--- a/include/linux/input/cyttsp.h
+++ b/include/linux/input/cyttsp.h
@@ -40,19 +40,4 @@
/* Active distance in pixels for a gesture to be reported */
#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-struct cyttsp_platform_data {
- u32 maxx;
- u32 maxy;
- bool use_hndshk;
- u8 act_dist; /* Active distance */
- u8 act_intrvl; /* Active refresh interval; ms */
- u8 tch_tmout; /* Active touch timeout; ms */
- u8 lp_intrvl; /* Low power refresh interval; ms */
- int (*init)(void);
- void (*exit)(void);
- char *name;
- s16 irq_gpio;
- u8 *bl_keys;
-};
-
#endif /* _CYTTSP_H_ */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0e95fcc75b2a..9fcabeb07787 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -125,6 +125,16 @@ struct irqaction {
extern irqreturn_t no_action(int cpl, void *dev_id);
+/*
+ * If a (PCI) device interrupt is not connected we set dev->irq to
+ * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
+ * can distingiush that case from other error returns.
+ *
+ * 0x80000000 is guaranteed to be outside the available range of interrupts
+ * and easy to distinguish from other possible incorrect values.
+ */
+#define IRQ_NOTCONNECTED (1U << 31)
+
extern int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
@@ -673,4 +683,24 @@ extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+/*
+ * We want to know which function is an entrypoint of a hardirq or a softirq.
+ */
+#define __irq_entry __attribute__((__section__(".irqentry.text")))
+#define __softirq_entry \
+ __attribute__((__section__(".softirqentry.text")))
+
+/* Limits of hardirq entrypoints */
+extern char __irqentry_text_start[];
+extern char __irqentry_text_end[];
+/* Limits of softirq entrypoints */
+extern char __softirqentry_text_start[];
+extern char __softirqentry_text_end[];
+
+#else
+#define __irq_entry
+#define __softirq_entry
+#endif
+
#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 32403b5716e5..e2c8419278c1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -135,6 +135,7 @@ enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
+ MEMREMAP_WC = 1 << 2,
};
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 24bea087e7af..0b65543dc6cf 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -20,6 +20,7 @@ struct resource {
resource_size_t end;
const char *name;
unsigned long flags;
+ unsigned long desc;
struct resource *parent, *sibling, *child;
};
@@ -49,12 +50,19 @@ struct resource {
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
+#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
+#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
+
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+/* I/O resource extended types */
+#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
+
/* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
@@ -98,13 +106,27 @@ struct resource {
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
-#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
-#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
-#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
+#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+/*
+ * I/O Resource Descriptors
+ *
+ * Descriptors are used by walk_iomem_res_desc() and region_intersects()
+ * for searching a specific resource range in the iomem table. Assign
+ * a new descriptor when a resource range supports the search interfaces.
+ * Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
+ */
+enum {
+ IORES_DESC_NONE = 0,
+ IORES_DESC_CRASH_KERNEL = 1,
+ IORES_DESC_ACPI_TABLES = 2,
+ IORES_DESC_ACPI_NV_STORAGE = 3,
+ IORES_DESC_PERSISTENT_MEMORY = 4,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+};
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
@@ -113,6 +135,7 @@ struct resource {
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
+ .desc = IORES_DESC_NONE, \
}
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
@@ -149,6 +172,7 @@ extern void reserve_region_with_split(struct resource *root,
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
+extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
@@ -170,6 +194,10 @@ static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
+static inline unsigned long resource_ext_type(const struct resource *res)
+{
+ return res->flags & IORESOURCE_EXT_TYPE_BITS;
+}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
@@ -239,8 +267,8 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(u64, u64, void *));
extern int
-walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *));
+walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
+ void *arg, int (*func)(u64, u64, void *));
/* True if any part of r1 overlaps r2 */
static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 402753bccafa..7edc14fb66b6 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -50,16 +50,19 @@ struct ipv6_devconf {
__s32 mc_forwarding;
#endif
__s32 disable_ipv6;
+ __s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
+ __s32 drop_unsolicited_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
} stable_secret;
__s32 use_oif_addrs_only;
+ __s32 keep_addr_on_down;
void *sysctl;
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3c1c96786248..c4de62348ff2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -133,17 +133,23 @@ struct irq_domain;
* Use accessor functions to deal with it
* @node: node index useful for balancing
* @handler_data: per-IRQ data for the irq_chip methods
- * @affinity: IRQ affinity on SMP
+ * @affinity: IRQ affinity on SMP. If this is an IPI
+ * related irq, then this is the mask of the
+ * CPUs to which an IPI can be sent.
* @msi_desc: MSI descriptor
+ * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional.
*/
struct irq_common_data {
- unsigned int state_use_accessors;
+ unsigned int __private state_use_accessors;
#ifdef CONFIG_NUMA
unsigned int node;
#endif
void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
+#ifdef CONFIG_GENERIC_IRQ_IPI
+ unsigned int ipi_offset;
+#endif
};
/**
@@ -208,7 +214,7 @@ enum {
IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
-#define __irqd_to_state(d) ((d)->common->state_use_accessors)
+#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
@@ -299,6 +305,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
+#undef __irqd_to_state
+
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
return d->hwirq;
@@ -341,6 +349,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_get_irqchip_state: return the internal state of an interrupt
* @irq_set_irqchip_state: set the internal state of a interrupt
* @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
+ * @ipi_send_single: send a single IPI to destination cpus
+ * @ipi_send_mask: send an IPI to destination cpus in cpumask
* @flags: chip specific flags
*/
struct irq_chip {
@@ -385,6 +395,9 @@ struct irq_chip {
int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+ void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
+ void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+
unsigned long flags;
};
@@ -934,4 +947,12 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
+/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
+#define INVALID_HWIRQ (~0UL)
+irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
+int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
+int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
+int ipi_send_single(unsigned int virq, unsigned int cpu);
+int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index ce824db48d64..80f89e4a29ac 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -261,9 +261,6 @@ extern void gic_write_compare(cycle_t cnt);
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
extern void gic_start_count(void);
extern void gic_stop_count(void);
-extern void gic_send_ipi(unsigned int intr);
-extern unsigned int plat_ipi_call_int_xlate(unsigned int);
-extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
extern int gic_get_c0_fdc_int(void);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 04579d9fbce4..2aed04396210 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -74,6 +74,8 @@ enum irq_domain_bus_token {
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
+ DOMAIN_BUS_FSL_MC_MSI,
};
/**
@@ -172,6 +174,12 @@ enum {
/* Core calls alloc/free recursive through the domain hierarchy. */
IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
+ /* Irq domain is an IPI domain with virq per cpu */
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
+
+ /* Irq domain is an IPI domain with single virq */
+ IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -206,6 +214,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
+ irq_hw_number_t hwirq, int node);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@@ -335,6 +345,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
+/* IPI functions */
+unsigned int irq_reserve_ipi(struct irq_domain *domain,
+ const struct cpumask *dest);
+void irq_destroy_ipi(unsigned int irq);
+
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
@@ -400,6 +415,22 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
}
+
+static inline bool irq_domain_is_ipi(struct irq_domain *domain)
+{
+ return domain->flags &
+ (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+}
+
+static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU;
+}
+
+static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -413,6 +444,21 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return false;
}
+
+static inline bool irq_domain_is_ipi(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+{
+ return false;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 2a8b1659bf35..548d55395488 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -23,6 +23,7 @@ enum iscsi_boot_eth_properties_enum {
ISCSI_BOOT_ETH_INDEX,
ISCSI_BOOT_ETH_FLAGS,
ISCSI_BOOT_ETH_IP_ADDR,
+ ISCSI_BOOT_ETH_PREFIX_LEN,
ISCSI_BOOT_ETH_SUBNET_MASK,
ISCSI_BOOT_ETH_ORIGIN,
ISCSI_BOOT_ETH_GATEWAY,
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index 1e9a0f2a8626..df97c8444f5d 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -319,6 +319,7 @@ typedef struct modem_info {
int online; /* 1 = B-Channel is up, drop data */
/* 2 = B-Channel is up, deliver d.*/
int dialing; /* Dial in progress or ATA */
+ int closing;
int rcvsched; /* Receive needs schedule */
int isdn_driver; /* Index to isdn-driver */
int isdn_channel; /* Index to isdn-channel */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 65407f6c9120..fd1083c46c61 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -200,7 +200,7 @@ typedef struct journal_block_tag_s
__be32 t_blocknr_high; /* most-significant high 32bits. */
} journal_block_tag_t;
-/* Tail of descriptor block, for checksumming */
+/* Tail of descriptor or revoke block, for checksumming */
struct jbd2_journal_block_tail {
__be32 t_checksum; /* crc32c(uuid+descr_block) */
};
@@ -215,11 +215,6 @@ typedef struct jbd2_journal_revoke_header_s
__be32 r_count; /* Count of bytes used in the block */
} jbd2_journal_revoke_header_t;
-/* Tail of revoke block, for checksumming */
-struct jbd2_journal_revoke_tail {
- __be32 r_checksum; /* crc32c(uuid+revoke_block) */
-};
-
/* Definitions for the journal tag flags word: */
#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */
@@ -1137,7 +1132,8 @@ static inline void jbd2_unfile_log_bh(struct buffer_head *bh)
}
/* Log buffer allocation */
-struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
+struct buffer_head *jbd2_journal_get_descriptor_buffer(transaction_t *, int);
+void jbd2_descriptor_block_csum_set(journal_t *, struct buffer_head *);
int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
unsigned long *block);
@@ -1327,10 +1323,8 @@ extern int jbd2_journal_init_revoke_caches(void);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void jbd2_journal_write_revoke_records(journal_t *journal,
- transaction_t *transaction,
- struct list_head *log_bufs,
- int write_op);
+extern void jbd2_journal_write_revoke_records(transaction_t *transaction,
+ struct list_head *log_bufs);
/* Recovery revoke support */
extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 4b9f85c963d0..737371b56044 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/sched.h>
#include <linux/types.h>
struct kmem_cache;
@@ -13,7 +14,6 @@ struct vm_struct;
#include <asm/kasan.h>
#include <asm/pgtable.h>
-#include <linux/sched.h>
extern unsigned char kasan_zero_page[PAGE_SIZE];
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
@@ -43,22 +43,33 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);
+void kasan_unpoison_task_stack(struct task_struct *task);
+
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
+void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+ unsigned long *flags);
+
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void kasan_kmalloc_large(const void *ptr, size_t size);
+void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr);
-void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
-void kasan_krealloc(const void *object, size_t new_size);
+void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
+ gfp_t flags);
+void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
-void kasan_slab_alloc(struct kmem_cache *s, void *object);
+void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
void kasan_slab_free(struct kmem_cache *s, void *object);
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
@@ -66,26 +77,34 @@ void kasan_free_shadow(const struct vm_struct *vm);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+static inline void kasan_cache_create(struct kmem_cache *cache,
+ size_t *size,
+ unsigned long *flags) {}
+
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
+static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size) {}
-static inline void kasan_krealloc(const void *object, size_t new_size) {}
+ size_t size, gfp_t flags) {}
+static inline void kasan_krealloc(const void *object, size_t new_size,
+ gfp_t flags) {}
-static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
+static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
new file mode 100644
index 000000000000..2883ac98c280
--- /dev/null
+++ b/include/linux/kcov.h
@@ -0,0 +1,29 @@
+#ifndef _LINUX_KCOV_H
+#define _LINUX_KCOV_H
+
+#include <uapi/linux/kcov.h>
+
+struct task_struct;
+
+#ifdef CONFIG_KCOV
+
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
+enum kcov_mode {
+ /* Coverage collection is not enabled yet. */
+ KCOV_MODE_DISABLED = 0,
+ /*
+ * Tracing coverage collection mode.
+ * Covered PCs are collected in a per-task buffer.
+ */
+ KCOV_MODE_TRACE = 1,
+};
+
+#else
+
+static inline void kcov_task_init(struct task_struct *t) {}
+static inline void kcov_task_exit(struct task_struct *t) {}
+
+#endif /* CONFIG_KCOV */
+#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f31638c6e873..2f7775e229b0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -64,7 +64,7 @@
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_UP_ULL(ll,d) \
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
@@ -255,7 +255,7 @@ extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...)
__noreturn __cold;
-void nmi_panic_self_stop(struct pt_regs *);
+void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
@@ -357,6 +357,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
@@ -368,6 +369,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne
int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
{
@@ -455,25 +457,6 @@ extern atomic_t panic_cpu;
#define PANIC_CPU_INVALID -1
/*
- * A variant of panic() called from NMI context. We return if we've already
- * panicked on this CPU. If another CPU already panicked, loop in
- * nmi_panic_self_stop() which can provide architecture dependent code such
- * as saving register state for crash dump.
- */
-#define nmi_panic(regs, fmt, ...) \
-do { \
- int old_cpu, cpu; \
- \
- cpu = raw_smp_processor_id(); \
- old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
- \
- if (old_cpu == PANIC_CPU_INVALID) \
- panic(fmt, ##__VA_ARGS__); \
- else if (old_cpu != cpu) \
- nmi_panic_self_stop(regs); \
-} while (0)
-
-/*
* Only to be used by arch init code. If the user over-wrote the default
* CONFIG_PANIC_TIMEOUT, honor it.
*/
@@ -635,7 +618,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
@@ -679,7 +662,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
*/
#define trace_puts(str) ({ \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
@@ -701,7 +684,7 @@ extern void trace_dump_stack(int skip);
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index af51df35d749..c06c44242f39 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -267,8 +267,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
size_t kernfs_path_len(struct kernfs_node *kn);
-char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
- size_t buflen);
+int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
+ char *buf, size_t buflen);
+char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
@@ -283,6 +284,8 @@ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
+struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
+ struct super_block *sb);
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);
@@ -338,8 +341,8 @@ static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
static inline size_t kernfs_path_len(struct kernfs_node *kn)
{ return 0; }
-static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
- size_t buflen)
+static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
+ size_t buflen)
{ return NULL; }
static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
diff --git a/include/linux/key.h b/include/linux/key.h
index 7321ab8ef949..5f5b1129dc92 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -219,6 +219,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
+#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 473b43678ad1..41eb6fdf87a8 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -401,7 +401,7 @@ __kfifo_int_must_check_helper( \
((typeof(__tmp->type))__kfifo->data) : \
(__tmp->buf) \
)[__kfifo->in & __tmp->kfifo.mask] = \
- (typeof(*__tmp->type))__val; \
+ *(typeof(__tmp->type))&__val; \
smp_wmb(); \
__kfifo->in++; \
} \
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 861f690aa791..5276fe0916fc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -25,6 +25,7 @@
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
+#include <linux/swait.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -218,7 +219,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
unsigned char fpu_counter;
- wait_queue_head_t wq;
+ struct swait_queue_head wq;
struct pid *pid;
int sigset_active;
sigset_t sigset;
@@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
}
#endif
-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp;
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index e23121f9d82a..59ccab297ae0 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
void clear_all_latency_tracing(struct task_struct *p);
+extern int sysctl_latencytop(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#else
static inline void
diff --git a/include/linux/leds.h b/include/linux/leds.h
index bc1476fda96e..f203a8f89d30 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -39,6 +39,7 @@ struct led_classdev {
/* Lower 16 bits reflect status */
#define LED_SUSPENDED (1 << 0)
+#define LED_UNREGISTERING (1 << 1)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
#define LED_BLINK_ONESHOT (1 << 17)
@@ -48,9 +49,12 @@ struct led_classdev {
#define LED_BLINK_DISABLE (1 << 21)
#define LED_SYSFS_DISABLE (1 << 22)
#define LED_DEV_CAP_FLASH (1 << 23)
+#define LED_HW_PLUGGABLE (1 << 24)
- /* Set LED brightness level */
- /* Must not sleep, use a workqueue if needed */
+ /* Set LED brightness level
+ * Must not sleep. Use brightness_set_blocking for drivers
+ * that can sleep while setting brightness.
+ */
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
/*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bec2abbd7ab2..2c4ebef79d0c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -720,7 +720,7 @@ struct ata_device {
union {
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
- };
+ } ____cacheline_aligned;
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index bed40dff0e86..833867b9ddc2 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -26,9 +26,8 @@ enum {
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
- ND_CMD_MAX_ELEM = 4,
+ ND_CMD_MAX_ELEM = 5,
ND_CMD_MAX_ENVELOPE = 16,
- ND_CMD_ARS_STATUS_MAX = SZ_4K,
ND_MAX_MAPPINGS = 32,
/* region flag indicating to direct-map persistent memory by default */
@@ -49,7 +48,7 @@ struct nvdimm;
struct nvdimm_bus_descriptor;
typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
- unsigned int buf_len);
+ unsigned int buf_len, int *cmd_rc);
struct nd_namespace_label;
struct nvdimm_drvdata;
@@ -72,6 +71,9 @@ struct nvdimm_bus_descriptor {
unsigned long dsm_mask;
char *provider_name;
ndctl_fn ndctl;
+ int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
+ int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd);
};
struct nd_cmd_desc {
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2190419bdf0a..cdcb2ccbefa8 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -92,9 +92,9 @@ enum {
NVM_ADDRMODE_CHANNEL = 1,
/* Plane programming mode for LUN */
- NVM_PLANE_SINGLE = 0,
- NVM_PLANE_DOUBLE = 1,
- NVM_PLANE_QUAD = 2,
+ NVM_PLANE_SINGLE = 1,
+ NVM_PLANE_DOUBLE = 2,
+ NVM_PLANE_QUAD = 4,
/* Status codes */
NVM_RSP_SUCCESS = 0x0,
@@ -242,6 +242,7 @@ struct nvm_rq {
uint16_t nr_pages;
uint16_t flags;
+ u64 ppa_status; /* ppa media status */
int error;
};
@@ -341,11 +342,12 @@ struct nvm_dev {
int lps_per_blk;
int *lptbl;
- unsigned long total_pages;
unsigned long total_blocks;
+ unsigned long total_secs;
int nr_luns;
unsigned max_pages_per_blk;
+ unsigned long *lun_map;
void *ppalist_pool;
struct nvm_id identity;
@@ -355,6 +357,7 @@ struct nvm_dev {
char name[DISK_NAME_LEN];
struct mutex mlock;
+ spinlock_t lock;
};
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -465,8 +468,13 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
+typedef void (nvmm_release_lun)(struct nvm_dev *, int);
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
struct nvmm_type {
const char *name;
unsigned int version[3];
@@ -488,9 +496,15 @@ struct nvmm_type {
/* Configuration management */
nvmm_get_lun_fn *get_lun;
+ nvmm_reserve_lun *reserve_lun;
+ nvmm_release_lun *release_lun;
/* Statistics */
nvmm_lun_info_print_fn *lun_info_print;
+
+ nvmm_get_area_fn *get_area;
+ nvmm_put_area_fn *put_area;
+
struct list_head list;
};
diff --git a/include/linux/list.h b/include/linux/list.h
index 30cf4200ab40..5356f4d661a7 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -113,17 +113,6 @@ extern void __list_del_entry(struct list_head *entry);
extern void list_del(struct list_head *entry);
#endif
-#ifdef CONFIG_DEBUG_LIST
-/*
- * See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one
- * of the pages it allocates is ever passed to list_add()
- */
-extern void list_force_poison(struct list_head *entry);
-#else
-/* fallback to the less strict LIST_POISON* definitions */
-#define list_force_poison list_del
-#endif
-
/**
* list_replace - replace old entry by new one
* @old : the element to be replaced
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ee7229a6c06a..cb483305e1f5 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -48,7 +48,7 @@ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
-static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
+static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h)
{
return !h->pprev;
}
@@ -68,7 +68,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
}
-static inline int hlist_bl_empty(const struct hlist_bl_head *h)
+static inline bool hlist_bl_empty(const struct hlist_bl_head *h)
{
return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
}
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index a8828652f794..bd830d590465 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -134,6 +134,15 @@ int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
int klp_disable_patch(struct klp_patch *);
+/* Called from the module loader during module coming/going states */
+int klp_module_coming(struct module *mod);
+void klp_module_going(struct module *mod);
+
+#else /* !CONFIG_LIVEPATCH */
+
+static inline int klp_module_coming(struct module *mod) { return 0; }
+static inline void klp_module_going(struct module *mod) { }
+
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4dca42fd32f5..d026b190c530 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -261,7 +261,6 @@ struct held_lock {
/*
* Initialization, self-test and debugging-output methods:
*/
-extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
@@ -392,7 +391,6 @@ static inline void lockdep_on(void)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
-# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 71969de4058c..cdee11cbcdf1 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -541,25 +541,24 @@
* @inode points to the inode to use as a reference.
* The current task must be the one that nominated @inode.
* Return 0 if successful.
- * @kernel_fw_from_file:
- * Load firmware from userspace (not called for built-in firmware).
- * @file contains the file structure pointing to the file containing
- * the firmware to load. This argument will be NULL if the firmware
- * was loaded via the uevent-triggered blob-based interface exposed
- * by CONFIG_FW_LOADER_USER_HELPER.
- * @buf pointer to buffer containing firmware contents.
- * @size length of the firmware contents.
- * Return 0 if permission is granted.
* @kernel_module_request:
* Ability to trigger the kernel to automatically upcall to userspace for
* userspace to load a kernel module with the given name.
* @kmod_name name of the module requested by the kernel
* Return 0 if successful.
- * @kernel_module_from_file:
- * Load a kernel module from userspace.
- * @file contains the file structure pointing to the file containing
- * the kernel module to load. If the module is being loaded from a blob,
- * this argument will be NULL.
+ * @kernel_read_file:
+ * Read a file specified by userspace.
+ * @file contains the file structure pointing to the file being read
+ * by the kernel.
+ * @id kernel read file identifier
+ * Return 0 if permission is granted.
+ * @kernel_post_read_file:
+ * Read a file specified by userspace.
+ * @file contains the file structure pointing to the file being read
+ * by the kernel.
+ * @buf pointer to buffer containing the file contents.
+ * @size length of the file contents.
+ * @id kernel read file identifier
* Return 0 if permission is granted.
* @task_fix_setuid:
* Update the module's state after setting one or more of the user
@@ -1454,9 +1453,11 @@ union security_list_options {
void (*cred_transfer)(struct cred *new, const struct cred *old);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
int (*kernel_module_request)(char *kmod_name);
int (*kernel_module_from_file)(struct file *file);
+ int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
+ int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
+ enum kernel_read_file_id id);
int (*task_fix_setuid)(struct cred *new, const struct cred *old,
int flags);
int (*task_setpgid)(struct task_struct *p, pid_t pgid);
@@ -1715,9 +1716,9 @@ struct security_hook_heads {
struct list_head cred_transfer;
struct list_head kernel_act_as;
struct list_head kernel_create_files_as;
- struct list_head kernel_fw_from_file;
+ struct list_head kernel_read_file;
+ struct list_head kernel_post_read_file;
struct list_head kernel_module_request;
- struct list_head kernel_module_from_file;
struct list_head task_fix_setuid;
struct list_head task_setpgid;
struct list_head task_getpgid;
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 246a3529ecf6..ac02c54520e9 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -596,7 +596,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
}
extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
-extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *);
extern unsigned short mISDN_clock_get(void);
extern const char *mISDNDevName4ch(struct mISDNchannel *);
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 6a392e7a723a..86c9a8b480c5 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -1,55 +1,52 @@
-/*
- File: linux/mbcache.h
+#ifndef _LINUX_MBCACHE_H
+#define _LINUX_MBCACHE_H
- (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-struct mb_cache_entry {
- struct list_head e_lru_list;
- struct mb_cache *e_cache;
- unsigned short e_used;
- unsigned short e_queued;
- atomic_t e_refcnt;
- struct block_device *e_bdev;
- sector_t e_block;
- struct hlist_bl_node e_block_list;
- struct {
- struct hlist_bl_node o_list;
- unsigned int o_key;
- } e_index;
- struct hlist_bl_head *e_block_hash_p;
- struct hlist_bl_head *e_index_hash_p;
-};
+#include <linux/hash.h>
+#include <linux/list_bl.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/fs.h>
-struct mb_cache {
- struct list_head c_cache_list;
- const char *c_name;
- atomic_t c_entry_count;
- int c_max_entries;
- int c_bucket_bits;
- struct kmem_cache *c_entry_cache;
- struct hlist_bl_head *c_block_hash;
- struct hlist_bl_head *c_index_hash;
-};
+struct mb_cache;
-/* Functions on caches */
+struct mb_cache_entry {
+ /* List of entries in cache - protected by cache->c_list_lock */
+ struct list_head e_list;
+ /* Hash table list - protected by hash chain bitlock */
+ struct hlist_bl_node e_hash_list;
+ atomic_t e_refcnt;
+ /* Key in hash - stable during lifetime of the entry */
+ u32 e_key;
+ u32 e_referenced:1;
+ u32 e_reusable:1;
+ /* Block number of hashed block - stable during lifetime of the entry */
+ sector_t e_block;
+};
-struct mb_cache *mb_cache_create(const char *, int);
-void mb_cache_shrink(struct block_device *);
-void mb_cache_destroy(struct mb_cache *);
+struct mb_cache *mb_cache_create(int bucket_bits);
+void mb_cache_destroy(struct mb_cache *cache);
-/* Functions on cache entries */
+int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
+ sector_t block, bool reusable);
+void __mb_cache_entry_free(struct mb_cache_entry *entry);
+static inline int mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
+{
+ if (!atomic_dec_and_test(&entry->e_refcnt))
+ return 0;
+ __mb_cache_entry_free(entry);
+ return 1;
+}
-struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
-int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
- sector_t, unsigned int);
-void mb_cache_entry_release(struct mb_cache_entry *);
-void mb_cache_entry_free(struct mb_cache_entry *);
-struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
- struct block_device *,
- sector_t);
+void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
+ sector_t block);
+struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
+ sector_t block);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
- struct block_device *,
- unsigned int);
-struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *,
- struct block_device *,
- unsigned int);
+ u32 key);
+struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_touch(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+
+#endif /* _LINUX_MBCACHE_H */
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 1f7bc630d225..ea34a867caa0 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -69,6 +69,9 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
int mvebu_mbus_save_cpu_target(u32 *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr);
int mvebu_mbus_add_window_remap_by_id(unsigned int target,
unsigned int attribute,
phys_addr_t base, size_t size,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 792c8981e633..1191d79aa495 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -28,6 +28,7 @@
#include <linux/eventfd.h>
#include <linux/mmzone.h>
#include <linux/writeback.h>
+#include <linux/page-flags.h>
struct mem_cgroup;
struct page;
@@ -51,7 +52,10 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
- MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_SLAB_RECLAIMABLE,
+ MEMCG_SLAB_UNRECLAIMABLE,
+ MEMCG_SOCK,
MEMCG_NR_STAT,
};
@@ -89,6 +93,10 @@ enum mem_cgroup_events_target {
};
#ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT 16
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
@@ -265,6 +273,11 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup;
+static inline bool mem_cgroup_disabled(void)
+{
+ return !cgroup_subsys_enabled(memory_cgrp_subsys);
+}
+
/**
* mem_cgroup_events - count memory events against a cgroup
* @memcg: the memory cgroup
@@ -291,7 +304,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
-void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage);
+void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
@@ -312,6 +325,28 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+
+ return memcg->css.id;
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from an id
+ * @id: the id to look up
+ *
+ * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
+ */
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ css = css_from_id(id, &memory_cgrp_subsys);
+ return mem_cgroup_from_css(css);
+}
+
/**
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
@@ -353,11 +388,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
ino_t page_cgroup_ino(struct page *page);
-static inline bool mem_cgroup_disabled(void)
-{
- return !cgroup_subsys_enabled(memory_cgrp_subsys);
-}
-
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
@@ -373,6 +403,9 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages);
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+ int nid, unsigned int lru_mask);
+
static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
@@ -429,36 +462,43 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
+void lock_page_memcg(struct page *page);
+void unlock_page_memcg(struct page *page);
/**
* mem_cgroup_update_page_stat - update page state statistics
- * @memcg: memcg to account against
+ * @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
- * See mem_cgroup_begin_page_stat() for locking requirements.
+ * The @page must be locked or the caller must use lock_page_memcg()
+ * to prevent double accounting when the page is concurrently being
+ * moved to another memcg:
+ *
+ * lock_page(page) or lock_page_memcg(page)
+ * if (TestClearPageState(page))
+ * mem_cgroup_update_page_stat(page, state, -1);
+ * unlock_page(page) or unlock_page_memcg(page)
*/
-static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
- VM_BUG_ON(!rcu_read_lock_held());
+ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
- if (memcg)
- this_cpu_add(memcg->stat->count[idx], val);
+ if (page->mem_cgroup)
+ this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}
-static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
- mem_cgroup_update_page_stat(memcg, idx, 1);
+ mem_cgroup_update_page_stat(page, idx, 1);
}
-static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
- mem_cgroup_update_page_stat(memcg, idx, -1);
+ mem_cgroup_update_page_stat(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -496,8 +536,17 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#endif
#else /* CONFIG_MEMCG */
+
+#define MEM_CGROUP_ID_SHIFT 0
+#define MEM_CGROUP_ID_MAX 0
+
struct mem_cgroup;
+static inline bool mem_cgroup_disabled(void)
+{
+ return true;
+}
+
static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
unsigned int nr)
@@ -539,7 +588,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_replace_page(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct page *old, struct page *new)
{
}
@@ -580,9 +629,16 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
-static inline bool mem_cgroup_disabled(void)
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
- return true;
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ WARN_ON_ONCE(id);
+ /* XXX: This should always return root_mem_cgroup */
+ return NULL;
}
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -608,17 +664,23 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
{
}
+static inline unsigned long
+mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+ int nid, unsigned int lru_mask)
+{
+ return 0;
+}
+
static inline void
mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
+static inline void lock_page_memcg(struct page *page)
{
- return NULL;
}
-static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
+static inline void unlock_page_memcg(struct page *page)
{
}
@@ -644,12 +706,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
-static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}
-static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}
@@ -743,11 +805,6 @@ static inline bool memcg_kmem_enabled(void)
return static_branch_unlikely(&memcg_kmem_enabled_key);
}
-static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
-{
- return memcg->kmem_state == KMEM_ONLINE;
-}
-
/*
* In general, we'll do everything in our power to not incur in any overhead
* for non-memcg users for the kmem functions. Not even a function call, if we
@@ -765,7 +822,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge(struct page *page, int order);
/*
- * helper for acessing a memcg's index. It will be used as an index in the
+ * helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function
* will return -1 when this is not a kmem-limited memcg.
*/
@@ -834,6 +891,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep);
}
+
+/**
+ * memcg_kmem_update_page_stat - update kmem page state statistics
+ * @page: the page
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ */
+static inline void memcg_kmem_update_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx, int val)
+{
+ if (memcg_kmem_enabled() && page->mem_cgroup)
+ this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+}
+
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -843,11 +914,6 @@ static inline bool memcg_kmem_enabled(void)
return false;
}
-static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
-{
- return false;
-}
-
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
{
return 0;
@@ -879,6 +945,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}
+
+static inline void memcg_kmem_update_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx, int val)
+{
+}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 8b8d8d12348e..093607f90b91 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,6 +109,9 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
+extern int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state,
+ unsigned long from_state_req);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif
@@ -137,17 +140,6 @@ extern struct memory_block *find_memory_block(struct mem_section *);
#endif
/*
- * 'struct memory_accessor' is a generic interface to provide
- * in-kernel access to persistent memory such as i2c or SPI EEPROMs
- */
-struct memory_accessor {
- ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
- size_t count);
- ssize_t (*write)(struct memory_accessor *, const char *buf,
- off_t offset, size_t count);
-};
-
-/*
* Kernel text modification mutex, used for code patching. Users of this lock
* can sleep.
*/
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 43405992d027..adbef586e696 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -99,6 +99,8 @@ extern void __online_page_free(struct page *page);
extern int try_online_node(int nid);
+extern bool memhp_auto_online;
+
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size);
@@ -196,6 +198,9 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
+extern void set_zone_contiguous(struct zone *zone);
+extern void clear_zone_contiguous(struct zone *zone);
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -267,7 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource);
+extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
bool for_device);
extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
index 38452ce1e892..34cc85864be5 100644
--- a/include/linux/mfd/as3711.h
+++ b/include/linux/mfd/as3711.h
@@ -51,7 +51,8 @@
#define AS3711_ASIC_ID_1 0x90
#define AS3711_ASIC_ID_2 0x91
-#define AS3711_MAX_REGS 0x92
+#define AS3711_MAX_REG AS3711_ASIC_ID_2
+#define AS3711_NUM_REGS (AS3711_MAX_REG + 1)
/* Regulators */
enum {
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index b24c771cebd5..d82e7d51372b 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -18,6 +18,7 @@ enum {
AXP202_ID,
AXP209_ID,
AXP221_ID,
+ AXP223_ID,
AXP288_ID,
NR_AXP20X_VARIANTS,
};
@@ -396,7 +397,7 @@ enum axp288_irqs {
struct axp20x_dev {
struct device *dev;
- struct i2c_client *i2c_client;
+ int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
long variant;
@@ -462,4 +463,35 @@ static inline int axp20x_read_variable_width(struct regmap *regmap,
return result;
}
+/**
+ * axp20x_match_device(): Setup axp20x variant related fields
+ *
+ * @axp20x: axp20x device to setup (.dev field must be set)
+ * @dev: device associated with this axp20x device
+ *
+ * This lets the axp20x core configure the mfd cells and register maps
+ * for later use.
+ */
+int axp20x_match_device(struct axp20x_dev *axp20x);
+
+/**
+ * axp20x_device_probe(): Probe a configured axp20x device
+ *
+ * @axp20x: axp20x device to probe (must be configured)
+ *
+ * This function lets the axp20x core register the axp20x mfd devices
+ * and irqchip. The axp20x device passed in must be fully configured
+ * with axp20x_match_device, its irq set, and regmap created.
+ */
+int axp20x_device_probe(struct axp20x_dev *axp20x);
+
+/**
+ * axp20x_device_probe(): Remove a axp20x device
+ *
+ * @axp20x: axp20x device to remove
+ *
+ * This tells the axp20x core to remove the associated mfd devices
+ */
+int axp20x_device_remove(struct axp20x_dev *axp20x);
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 494682ce4bf3..a677c2bd485c 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -245,7 +245,7 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
int cros_ec_register(struct cros_ec_device *ec_dev);
/**
- * cros_ec_register - Query the protocol version supported by the ChromeOS EC
+ * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
*
* @ec_dev: Device to register
* @return 0 if ok, -ve on error
diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h
new file mode 100644
index 000000000000..7fe4b8c3baac
--- /dev/null
+++ b/include/linux/mfd/imx25-tsadc.h
@@ -0,0 +1,140 @@
+#ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
+#define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
+
+struct regmap;
+struct clk;
+
+struct mx25_tsadc {
+ struct regmap *regs;
+ struct irq_domain *domain;
+ struct clk *clk;
+};
+
+#define MX25_TSC_TGCR 0x00
+#define MX25_TSC_TGSR 0x04
+#define MX25_TSC_TICR 0x08
+
+/* The same register layout for TC and GC queue */
+#define MX25_ADCQ_FIFO 0x00
+#define MX25_ADCQ_CR 0x04
+#define MX25_ADCQ_SR 0x08
+#define MX25_ADCQ_MR 0x0c
+#define MX25_ADCQ_ITEM_7_0 0x20
+#define MX25_ADCQ_ITEM_15_8 0x24
+#define MX25_ADCQ_CFG(n) (0x40 + ((n) * 0x4))
+
+#define MX25_ADCQ_MR_MASK 0xffffffff
+
+/* TGCR */
+#define MX25_TGCR_PDBTIME(x) ((x) << 25)
+#define MX25_TGCR_PDBTIME_MASK GENMASK(31, 25)
+#define MX25_TGCR_PDBEN BIT(24)
+#define MX25_TGCR_PDEN BIT(23)
+#define MX25_TGCR_ADCCLKCFG(x) ((x) << 16)
+#define MX25_TGCR_GET_ADCCLK(x) (((x) >> 16) & 0x1f)
+#define MX25_TGCR_INTREFEN BIT(10)
+#define MX25_TGCR_POWERMODE_MASK GENMASK(9, 8)
+#define MX25_TGCR_POWERMODE_SAVE (1 << 8)
+#define MX25_TGCR_POWERMODE_ON (2 << 8)
+#define MX25_TGCR_STLC BIT(5)
+#define MX25_TGCR_SLPC BIT(4)
+#define MX25_TGCR_FUNC_RST BIT(2)
+#define MX25_TGCR_TSC_RST BIT(1)
+#define MX25_TGCR_CLK_EN BIT(0)
+
+/* TGSR */
+#define MX25_TGSR_SLP_INT BIT(2)
+#define MX25_TGSR_GCQ_INT BIT(1)
+#define MX25_TGSR_TCQ_INT BIT(0)
+
+/* ADCQ_ITEM_* */
+#define _MX25_ADCQ_ITEM(item, x) ((x) << ((item) * 4))
+#define MX25_ADCQ_ITEM(item, x) ((item) >= 8 ? \
+ _MX25_ADCQ_ITEM((item) - 8, (x)) : _MX25_ADCQ_ITEM((item), (x)))
+
+/* ADCQ_FIFO (TCQFIFO and GCQFIFO) */
+#define MX25_ADCQ_FIFO_DATA(x) (((x) >> 4) & 0xfff)
+#define MX25_ADCQ_FIFO_ID(x) ((x) & 0xf)
+
+/* ADCQ_CR (TCQR and GCQR) */
+#define MX25_ADCQ_CR_PDCFG_LEVEL BIT(19)
+#define MX25_ADCQ_CR_PDMSK BIT(18)
+#define MX25_ADCQ_CR_FRST BIT(17)
+#define MX25_ADCQ_CR_QRST BIT(16)
+#define MX25_ADCQ_CR_RWAIT_MASK GENMASK(15, 12)
+#define MX25_ADCQ_CR_RWAIT(x) ((x) << 12)
+#define MX25_ADCQ_CR_WMRK_MASK GENMASK(11, 8)
+#define MX25_ADCQ_CR_WMRK(x) ((x) << 8)
+#define MX25_ADCQ_CR_LITEMID_MASK (0xf << 4)
+#define MX25_ADCQ_CR_LITEMID(x) ((x) << 4)
+#define MX25_ADCQ_CR_RPT BIT(3)
+#define MX25_ADCQ_CR_FQS BIT(2)
+#define MX25_ADCQ_CR_QSM_MASK GENMASK(1, 0)
+#define MX25_ADCQ_CR_QSM_PD 0x1
+#define MX25_ADCQ_CR_QSM_FQS 0x2
+#define MX25_ADCQ_CR_QSM_FQS_PD 0x3
+
+/* ADCQ_SR (TCQSR and GCQSR) */
+#define MX25_ADCQ_SR_FDRY BIT(15)
+#define MX25_ADCQ_SR_FULL BIT(14)
+#define MX25_ADCQ_SR_EMPT BIT(13)
+#define MX25_ADCQ_SR_FDN(x) (((x) >> 8) & 0x1f)
+#define MX25_ADCQ_SR_FRR BIT(6)
+#define MX25_ADCQ_SR_FUR BIT(5)
+#define MX25_ADCQ_SR_FOR BIT(4)
+#define MX25_ADCQ_SR_EOQ BIT(1)
+#define MX25_ADCQ_SR_PD BIT(0)
+
+/* ADCQ_MR (TCQMR and GCQMR) */
+#define MX25_ADCQ_MR_FDRY_DMA BIT(31)
+#define MX25_ADCQ_MR_FER_DMA BIT(22)
+#define MX25_ADCQ_MR_FUR_DMA BIT(21)
+#define MX25_ADCQ_MR_FOR_DMA BIT(20)
+#define MX25_ADCQ_MR_EOQ_DMA BIT(17)
+#define MX25_ADCQ_MR_PD_DMA BIT(16)
+#define MX25_ADCQ_MR_FDRY_IRQ BIT(15)
+#define MX25_ADCQ_MR_FER_IRQ BIT(6)
+#define MX25_ADCQ_MR_FUR_IRQ BIT(5)
+#define MX25_ADCQ_MR_FOR_IRQ BIT(4)
+#define MX25_ADCQ_MR_EOQ_IRQ BIT(1)
+#define MX25_ADCQ_MR_PD_IRQ BIT(0)
+
+/* ADCQ_CFG (TICR, TCC0-7,GCC0-7) */
+#define MX25_ADCQ_CFG_SETTLING_TIME(x) ((x) << 24)
+#define MX25_ADCQ_CFG_IGS (1 << 20)
+#define MX25_ADCQ_CFG_NOS_MASK GENMASK(19, 16)
+#define MX25_ADCQ_CFG_NOS(x) (((x) - 1) << 16)
+#define MX25_ADCQ_CFG_WIPER (1 << 15)
+#define MX25_ADCQ_CFG_YNLR (1 << 14)
+#define MX25_ADCQ_CFG_YPLL_HIGH (0 << 12)
+#define MX25_ADCQ_CFG_YPLL_OFF (1 << 12)
+#define MX25_ADCQ_CFG_YPLL_LOW (3 << 12)
+#define MX25_ADCQ_CFG_XNUR_HIGH (0 << 10)
+#define MX25_ADCQ_CFG_XNUR_OFF (1 << 10)
+#define MX25_ADCQ_CFG_XNUR_LOW (3 << 10)
+#define MX25_ADCQ_CFG_XPUL_HIGH (0 << 9)
+#define MX25_ADCQ_CFG_XPUL_OFF (1 << 9)
+#define MX25_ADCQ_CFG_REFP(sel) ((sel) << 7)
+#define MX25_ADCQ_CFG_REFP_YP MX25_ADCQ_CFG_REFP(0)
+#define MX25_ADCQ_CFG_REFP_XP MX25_ADCQ_CFG_REFP(1)
+#define MX25_ADCQ_CFG_REFP_EXT MX25_ADCQ_CFG_REFP(2)
+#define MX25_ADCQ_CFG_REFP_INT MX25_ADCQ_CFG_REFP(3)
+#define MX25_ADCQ_CFG_REFP_MASK GENMASK(8, 7)
+#define MX25_ADCQ_CFG_IN(sel) ((sel) << 4)
+#define MX25_ADCQ_CFG_IN_XP MX25_ADCQ_CFG_IN(0)
+#define MX25_ADCQ_CFG_IN_YP MX25_ADCQ_CFG_IN(1)
+#define MX25_ADCQ_CFG_IN_XN MX25_ADCQ_CFG_IN(2)
+#define MX25_ADCQ_CFG_IN_YN MX25_ADCQ_CFG_IN(3)
+#define MX25_ADCQ_CFG_IN_WIPER MX25_ADCQ_CFG_IN(4)
+#define MX25_ADCQ_CFG_IN_AUX0 MX25_ADCQ_CFG_IN(5)
+#define MX25_ADCQ_CFG_IN_AUX1 MX25_ADCQ_CFG_IN(6)
+#define MX25_ADCQ_CFG_IN_AUX2 MX25_ADCQ_CFG_IN(7)
+#define MX25_ADCQ_CFG_REFN(sel) ((sel) << 2)
+#define MX25_ADCQ_CFG_REFN_XN MX25_ADCQ_CFG_REFN(0)
+#define MX25_ADCQ_CFG_REFN_YN MX25_ADCQ_CFG_REFN(1)
+#define MX25_ADCQ_CFG_REFN_NGND MX25_ADCQ_CFG_REFN(2)
+#define MX25_ADCQ_CFG_REFN_NGND2 MX25_ADCQ_CFG_REFN(3)
+#define MX25_ADCQ_CFG_REFN_MASK GENMASK(3, 2)
+#define MX25_ADCQ_CFG_PENIACK (1 << 1)
+
+#endif /* _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index f5043490d67c..643dae777b43 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -437,14 +437,11 @@ enum max77686_irq {
struct max77686_dev {
struct device *dev;
struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
- struct i2c_client *rtc; /* slave addr 0x0c */
unsigned long type;
struct regmap *regmap; /* regmap for mfd */
- struct regmap *rtc_regmap; /* regmap for rtc */
struct regmap_irq_chip_data *irq_data;
- struct regmap_irq_chip_data *rtc_irq_data;
int irq;
struct mutex irqlock;
diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h
new file mode 100644
index 000000000000..06d0ec3b1f8f
--- /dev/null
+++ b/include/linux/mfd/mt6323/core.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_MT6323_CORE_H__
+#define __MFD_MT6323_CORE_H__
+
+enum MT6323_IRQ_STATUS_numbers {
+ MT6323_IRQ_STATUS_SPKL_AB = 0,
+ MT6323_IRQ_STATUS_SPKL,
+ MT6323_IRQ_STATUS_BAT_L,
+ MT6323_IRQ_STATUS_BAT_H,
+ MT6323_IRQ_STATUS_WATCHDOG,
+ MT6323_IRQ_STATUS_PWRKEY,
+ MT6323_IRQ_STATUS_THR_L,
+ MT6323_IRQ_STATUS_THR_H,
+ MT6323_IRQ_STATUS_VBATON_UNDET,
+ MT6323_IRQ_STATUS_BVALID_DET,
+ MT6323_IRQ_STATUS_CHRDET,
+ MT6323_IRQ_STATUS_OV,
+ MT6323_IRQ_STATUS_LDO = 16,
+ MT6323_IRQ_STATUS_FCHRKEY,
+ MT6323_IRQ_STATUS_ACCDET,
+ MT6323_IRQ_STATUS_AUDIO,
+ MT6323_IRQ_STATUS_RTC,
+ MT6323_IRQ_STATUS_VPROC,
+ MT6323_IRQ_STATUS_VSYS,
+ MT6323_IRQ_STATUS_VPA,
+ MT6323_IRQ_STATUS_NR,
+};
+
+#endif /* __MFD_MT6323_CORE_H__ */
diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h
new file mode 100644
index 000000000000..160f3c0e2589
--- /dev/null
+++ b/include/linux/mfd/mt6323/registers.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_MT6323_REGISTERS_H__
+#define __MFD_MT6323_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6323_CHR_CON0 0x0000
+#define MT6323_CHR_CON1 0x0002
+#define MT6323_CHR_CON2 0x0004
+#define MT6323_CHR_CON3 0x0006
+#define MT6323_CHR_CON4 0x0008
+#define MT6323_CHR_CON5 0x000A
+#define MT6323_CHR_CON6 0x000C
+#define MT6323_CHR_CON7 0x000E
+#define MT6323_CHR_CON8 0x0010
+#define MT6323_CHR_CON9 0x0012
+#define MT6323_CHR_CON10 0x0014
+#define MT6323_CHR_CON11 0x0016
+#define MT6323_CHR_CON12 0x0018
+#define MT6323_CHR_CON13 0x001A
+#define MT6323_CHR_CON14 0x001C
+#define MT6323_CHR_CON15 0x001E
+#define MT6323_CHR_CON16 0x0020
+#define MT6323_CHR_CON17 0x0022
+#define MT6323_CHR_CON18 0x0024
+#define MT6323_CHR_CON19 0x0026
+#define MT6323_CHR_CON20 0x0028
+#define MT6323_CHR_CON21 0x002A
+#define MT6323_CHR_CON22 0x002C
+#define MT6323_CHR_CON23 0x002E
+#define MT6323_CHR_CON24 0x0030
+#define MT6323_CHR_CON25 0x0032
+#define MT6323_CHR_CON26 0x0034
+#define MT6323_CHR_CON27 0x0036
+#define MT6323_CHR_CON28 0x0038
+#define MT6323_CHR_CON29 0x003A
+#define MT6323_STRUP_CON0 0x003C
+#define MT6323_STRUP_CON2 0x003E
+#define MT6323_STRUP_CON3 0x0040
+#define MT6323_STRUP_CON4 0x0042
+#define MT6323_STRUP_CON5 0x0044
+#define MT6323_STRUP_CON6 0x0046
+#define MT6323_STRUP_CON7 0x0048
+#define MT6323_STRUP_CON8 0x004A
+#define MT6323_STRUP_CON9 0x004C
+#define MT6323_STRUP_CON10 0x004E
+#define MT6323_STRUP_CON11 0x0050
+#define MT6323_SPK_CON0 0x0052
+#define MT6323_SPK_CON1 0x0054
+#define MT6323_SPK_CON2 0x0056
+#define MT6323_SPK_CON6 0x005E
+#define MT6323_SPK_CON7 0x0060
+#define MT6323_SPK_CON8 0x0062
+#define MT6323_SPK_CON9 0x0064
+#define MT6323_SPK_CON10 0x0066
+#define MT6323_SPK_CON11 0x0068
+#define MT6323_SPK_CON12 0x006A
+#define MT6323_CID 0x0100
+#define MT6323_TOP_CKPDN0 0x0102
+#define MT6323_TOP_CKPDN0_SET 0x0104
+#define MT6323_TOP_CKPDN0_CLR 0x0106
+#define MT6323_TOP_CKPDN1 0x0108
+#define MT6323_TOP_CKPDN1_SET 0x010A
+#define MT6323_TOP_CKPDN1_CLR 0x010C
+#define MT6323_TOP_CKPDN2 0x010E
+#define MT6323_TOP_CKPDN2_SET 0x0110
+#define MT6323_TOP_CKPDN2_CLR 0x0112
+#define MT6323_TOP_RST_CON 0x0114
+#define MT6323_TOP_RST_CON_SET 0x0116
+#define MT6323_TOP_RST_CON_CLR 0x0118
+#define MT6323_TOP_RST_MISC 0x011A
+#define MT6323_TOP_RST_MISC_SET 0x011C
+#define MT6323_TOP_RST_MISC_CLR 0x011E
+#define MT6323_TOP_CKCON0 0x0120
+#define MT6323_TOP_CKCON0_SET 0x0122
+#define MT6323_TOP_CKCON0_CLR 0x0124
+#define MT6323_TOP_CKCON1 0x0126
+#define MT6323_TOP_CKCON1_SET 0x0128
+#define MT6323_TOP_CKCON1_CLR 0x012A
+#define MT6323_TOP_CKTST0 0x012C
+#define MT6323_TOP_CKTST1 0x012E
+#define MT6323_TOP_CKTST2 0x0130
+#define MT6323_TEST_OUT 0x0132
+#define MT6323_TEST_CON0 0x0134
+#define MT6323_TEST_CON1 0x0136
+#define MT6323_EN_STATUS0 0x0138
+#define MT6323_EN_STATUS1 0x013A
+#define MT6323_OCSTATUS0 0x013C
+#define MT6323_OCSTATUS1 0x013E
+#define MT6323_PGSTATUS 0x0140
+#define MT6323_CHRSTATUS 0x0142
+#define MT6323_TDSEL_CON 0x0144
+#define MT6323_RDSEL_CON 0x0146
+#define MT6323_SMT_CON0 0x0148
+#define MT6323_SMT_CON1 0x014A
+#define MT6323_SMT_CON2 0x014C
+#define MT6323_SMT_CON3 0x014E
+#define MT6323_SMT_CON4 0x0150
+#define MT6323_DRV_CON0 0x0152
+#define MT6323_DRV_CON1 0x0154
+#define MT6323_DRV_CON2 0x0156
+#define MT6323_DRV_CON3 0x0158
+#define MT6323_DRV_CON4 0x015A
+#define MT6323_SIMLS1_CON 0x015C
+#define MT6323_SIMLS2_CON 0x015E
+#define MT6323_INT_CON0 0x0160
+#define MT6323_INT_CON0_SET 0x0162
+#define MT6323_INT_CON0_CLR 0x0164
+#define MT6323_INT_CON1 0x0166
+#define MT6323_INT_CON1_SET 0x0168
+#define MT6323_INT_CON1_CLR 0x016A
+#define MT6323_INT_MISC_CON 0x016C
+#define MT6323_INT_MISC_CON_SET 0x016E
+#define MT6323_INT_MISC_CON_CLR 0x0170
+#define MT6323_INT_STATUS0 0x0172
+#define MT6323_INT_STATUS1 0x0174
+#define MT6323_OC_GEAR_0 0x0176
+#define MT6323_OC_GEAR_1 0x0178
+#define MT6323_OC_GEAR_2 0x017A
+#define MT6323_OC_CTL_VPROC 0x017C
+#define MT6323_OC_CTL_VSYS 0x017E
+#define MT6323_OC_CTL_VPA 0x0180
+#define MT6323_FQMTR_CON0 0x0182
+#define MT6323_FQMTR_CON1 0x0184
+#define MT6323_FQMTR_CON2 0x0186
+#define MT6323_RG_SPI_CON 0x0188
+#define MT6323_DEW_DIO_EN 0x018A
+#define MT6323_DEW_READ_TEST 0x018C
+#define MT6323_DEW_WRITE_TEST 0x018E
+#define MT6323_DEW_CRC_SWRST 0x0190
+#define MT6323_DEW_CRC_EN 0x0192
+#define MT6323_DEW_CRC_VAL 0x0194
+#define MT6323_DEW_DBG_MON_SEL 0x0196
+#define MT6323_DEW_CIPHER_KEY_SEL 0x0198
+#define MT6323_DEW_CIPHER_IV_SEL 0x019A
+#define MT6323_DEW_CIPHER_EN 0x019C
+#define MT6323_DEW_CIPHER_RDY 0x019E
+#define MT6323_DEW_CIPHER_MODE 0x01A0
+#define MT6323_DEW_CIPHER_SWRST 0x01A2
+#define MT6323_DEW_RDDMY_NO 0x01A4
+#define MT6323_DEW_RDATA_DLY_SEL 0x01A6
+#define MT6323_BUCK_CON0 0x0200
+#define MT6323_BUCK_CON1 0x0202
+#define MT6323_BUCK_CON2 0x0204
+#define MT6323_BUCK_CON3 0x0206
+#define MT6323_BUCK_CON4 0x0208
+#define MT6323_BUCK_CON5 0x020A
+#define MT6323_VPROC_CON0 0x020C
+#define MT6323_VPROC_CON1 0x020E
+#define MT6323_VPROC_CON2 0x0210
+#define MT6323_VPROC_CON3 0x0212
+#define MT6323_VPROC_CON4 0x0214
+#define MT6323_VPROC_CON5 0x0216
+#define MT6323_VPROC_CON7 0x021A
+#define MT6323_VPROC_CON8 0x021C
+#define MT6323_VPROC_CON9 0x021E
+#define MT6323_VPROC_CON10 0x0220
+#define MT6323_VPROC_CON11 0x0222
+#define MT6323_VPROC_CON12 0x0224
+#define MT6323_VPROC_CON13 0x0226
+#define MT6323_VPROC_CON14 0x0228
+#define MT6323_VPROC_CON15 0x022A
+#define MT6323_VPROC_CON18 0x0230
+#define MT6323_VSYS_CON0 0x0232
+#define MT6323_VSYS_CON1 0x0234
+#define MT6323_VSYS_CON2 0x0236
+#define MT6323_VSYS_CON3 0x0238
+#define MT6323_VSYS_CON4 0x023A
+#define MT6323_VSYS_CON5 0x023C
+#define MT6323_VSYS_CON7 0x0240
+#define MT6323_VSYS_CON8 0x0242
+#define MT6323_VSYS_CON9 0x0244
+#define MT6323_VSYS_CON10 0x0246
+#define MT6323_VSYS_CON11 0x0248
+#define MT6323_VSYS_CON12 0x024A
+#define MT6323_VSYS_CON13 0x024C
+#define MT6323_VSYS_CON14 0x024E
+#define MT6323_VSYS_CON15 0x0250
+#define MT6323_VSYS_CON18 0x0256
+#define MT6323_VPA_CON0 0x0300
+#define MT6323_VPA_CON1 0x0302
+#define MT6323_VPA_CON2 0x0304
+#define MT6323_VPA_CON3 0x0306
+#define MT6323_VPA_CON4 0x0308
+#define MT6323_VPA_CON5 0x030A
+#define MT6323_VPA_CON7 0x030E
+#define MT6323_VPA_CON8 0x0310
+#define MT6323_VPA_CON9 0x0312
+#define MT6323_VPA_CON10 0x0314
+#define MT6323_VPA_CON11 0x0316
+#define MT6323_VPA_CON12 0x0318
+#define MT6323_VPA_CON14 0x031C
+#define MT6323_VPA_CON16 0x0320
+#define MT6323_VPA_CON17 0x0322
+#define MT6323_VPA_CON18 0x0324
+#define MT6323_VPA_CON19 0x0326
+#define MT6323_VPA_CON20 0x0328
+#define MT6323_BUCK_K_CON0 0x032A
+#define MT6323_BUCK_K_CON1 0x032C
+#define MT6323_BUCK_K_CON2 0x032E
+#define MT6323_ISINK0_CON0 0x0330
+#define MT6323_ISINK0_CON1 0x0332
+#define MT6323_ISINK0_CON2 0x0334
+#define MT6323_ISINK0_CON3 0x0336
+#define MT6323_ISINK1_CON0 0x0338
+#define MT6323_ISINK1_CON1 0x033A
+#define MT6323_ISINK1_CON2 0x033C
+#define MT6323_ISINK1_CON3 0x033E
+#define MT6323_ISINK2_CON0 0x0340
+#define MT6323_ISINK2_CON1 0x0342
+#define MT6323_ISINK2_CON2 0x0344
+#define MT6323_ISINK2_CON3 0x0346
+#define MT6323_ISINK3_CON0 0x0348
+#define MT6323_ISINK3_CON1 0x034A
+#define MT6323_ISINK3_CON2 0x034C
+#define MT6323_ISINK3_CON3 0x034E
+#define MT6323_ISINK_ANA0 0x0350
+#define MT6323_ISINK_ANA1 0x0352
+#define MT6323_ISINK_PHASE_DLY 0x0354
+#define MT6323_ISINK_EN_CTRL 0x0356
+#define MT6323_ANALDO_CON0 0x0400
+#define MT6323_ANALDO_CON1 0x0402
+#define MT6323_ANALDO_CON2 0x0404
+#define MT6323_ANALDO_CON3 0x0406
+#define MT6323_ANALDO_CON4 0x0408
+#define MT6323_ANALDO_CON5 0x040A
+#define MT6323_ANALDO_CON6 0x040C
+#define MT6323_ANALDO_CON7 0x040E
+#define MT6323_ANALDO_CON8 0x0410
+#define MT6323_ANALDO_CON10 0x0412
+#define MT6323_ANALDO_CON15 0x0414
+#define MT6323_ANALDO_CON16 0x0416
+#define MT6323_ANALDO_CON17 0x0418
+#define MT6323_ANALDO_CON18 0x041A
+#define MT6323_ANALDO_CON19 0x041C
+#define MT6323_ANALDO_CON20 0x041E
+#define MT6323_ANALDO_CON21 0x0420
+#define MT6323_DIGLDO_CON0 0x0500
+#define MT6323_DIGLDO_CON2 0x0502
+#define MT6323_DIGLDO_CON3 0x0504
+#define MT6323_DIGLDO_CON5 0x0506
+#define MT6323_DIGLDO_CON6 0x0508
+#define MT6323_DIGLDO_CON7 0x050A
+#define MT6323_DIGLDO_CON8 0x050C
+#define MT6323_DIGLDO_CON9 0x050E
+#define MT6323_DIGLDO_CON10 0x0510
+#define MT6323_DIGLDO_CON11 0x0512
+#define MT6323_DIGLDO_CON12 0x0514
+#define MT6323_DIGLDO_CON13 0x0516
+#define MT6323_DIGLDO_CON14 0x0518
+#define MT6323_DIGLDO_CON15 0x051A
+#define MT6323_DIGLDO_CON16 0x051C
+#define MT6323_DIGLDO_CON17 0x051E
+#define MT6323_DIGLDO_CON18 0x0520
+#define MT6323_DIGLDO_CON19 0x0522
+#define MT6323_DIGLDO_CON20 0x0524
+#define MT6323_DIGLDO_CON21 0x0526
+#define MT6323_DIGLDO_CON23 0x0528
+#define MT6323_DIGLDO_CON24 0x052A
+#define MT6323_DIGLDO_CON26 0x052C
+#define MT6323_DIGLDO_CON27 0x052E
+#define MT6323_DIGLDO_CON28 0x0530
+#define MT6323_DIGLDO_CON29 0x0532
+#define MT6323_DIGLDO_CON30 0x0534
+#define MT6323_DIGLDO_CON31 0x0536
+#define MT6323_DIGLDO_CON32 0x0538
+#define MT6323_DIGLDO_CON33 0x053A
+#define MT6323_DIGLDO_CON34 0x053C
+#define MT6323_DIGLDO_CON35 0x053E
+#define MT6323_DIGLDO_CON36 0x0540
+#define MT6323_DIGLDO_CON39 0x0542
+#define MT6323_DIGLDO_CON40 0x0544
+#define MT6323_DIGLDO_CON41 0x0546
+#define MT6323_DIGLDO_CON42 0x0548
+#define MT6323_DIGLDO_CON43 0x054A
+#define MT6323_DIGLDO_CON44 0x054C
+#define MT6323_DIGLDO_CON45 0x054E
+#define MT6323_DIGLDO_CON46 0x0550
+#define MT6323_DIGLDO_CON47 0x0552
+#define MT6323_DIGLDO_CON48 0x0554
+#define MT6323_DIGLDO_CON49 0x0556
+#define MT6323_DIGLDO_CON50 0x0558
+#define MT6323_DIGLDO_CON51 0x055A
+#define MT6323_DIGLDO_CON52 0x055C
+#define MT6323_DIGLDO_CON53 0x055E
+#define MT6323_DIGLDO_CON54 0x0560
+#define MT6323_EFUSE_CON0 0x0600
+#define MT6323_EFUSE_CON1 0x0602
+#define MT6323_EFUSE_CON2 0x0604
+#define MT6323_EFUSE_CON3 0x0606
+#define MT6323_EFUSE_CON4 0x0608
+#define MT6323_EFUSE_CON5 0x060A
+#define MT6323_EFUSE_CON6 0x060C
+#define MT6323_EFUSE_VAL_0_15 0x060E
+#define MT6323_EFUSE_VAL_16_31 0x0610
+#define MT6323_EFUSE_VAL_32_47 0x0612
+#define MT6323_EFUSE_VAL_48_63 0x0614
+#define MT6323_EFUSE_VAL_64_79 0x0616
+#define MT6323_EFUSE_VAL_80_95 0x0618
+#define MT6323_EFUSE_VAL_96_111 0x061A
+#define MT6323_EFUSE_VAL_112_127 0x061C
+#define MT6323_EFUSE_VAL_128_143 0x061E
+#define MT6323_EFUSE_VAL_144_159 0x0620
+#define MT6323_EFUSE_VAL_160_175 0x0622
+#define MT6323_EFUSE_VAL_176_191 0x0624
+#define MT6323_EFUSE_DOUT_0_15 0x0626
+#define MT6323_EFUSE_DOUT_16_31 0x0628
+#define MT6323_EFUSE_DOUT_32_47 0x062A
+#define MT6323_EFUSE_DOUT_48_63 0x062C
+#define MT6323_EFUSE_DOUT_64_79 0x062E
+#define MT6323_EFUSE_DOUT_80_95 0x0630
+#define MT6323_EFUSE_DOUT_96_111 0x0632
+#define MT6323_EFUSE_DOUT_112_127 0x0634
+#define MT6323_EFUSE_DOUT_128_143 0x0636
+#define MT6323_EFUSE_DOUT_144_159 0x0638
+#define MT6323_EFUSE_DOUT_160_175 0x063A
+#define MT6323_EFUSE_DOUT_176_191 0x063C
+#define MT6323_EFUSE_CON7 0x063E
+#define MT6323_EFUSE_CON8 0x0640
+#define MT6323_EFUSE_CON9 0x0642
+#define MT6323_RTC_MIX_CON0 0x0644
+#define MT6323_RTC_MIX_CON1 0x0646
+#define MT6323_AUDTOP_CON0 0x0700
+#define MT6323_AUDTOP_CON1 0x0702
+#define MT6323_AUDTOP_CON2 0x0704
+#define MT6323_AUDTOP_CON3 0x0706
+#define MT6323_AUDTOP_CON4 0x0708
+#define MT6323_AUDTOP_CON5 0x070A
+#define MT6323_AUDTOP_CON6 0x070C
+#define MT6323_AUDTOP_CON7 0x070E
+#define MT6323_AUDTOP_CON8 0x0710
+#define MT6323_AUDTOP_CON9 0x0712
+#define MT6323_AUXADC_ADC0 0x0714
+#define MT6323_AUXADC_ADC1 0x0716
+#define MT6323_AUXADC_ADC2 0x0718
+#define MT6323_AUXADC_ADC3 0x071A
+#define MT6323_AUXADC_ADC4 0x071C
+#define MT6323_AUXADC_ADC5 0x071E
+#define MT6323_AUXADC_ADC6 0x0720
+#define MT6323_AUXADC_ADC7 0x0722
+#define MT6323_AUXADC_ADC8 0x0724
+#define MT6323_AUXADC_ADC9 0x0726
+#define MT6323_AUXADC_ADC10 0x0728
+#define MT6323_AUXADC_ADC11 0x072A
+#define MT6323_AUXADC_ADC12 0x072C
+#define MT6323_AUXADC_ADC13 0x072E
+#define MT6323_AUXADC_ADC14 0x0730
+#define MT6323_AUXADC_ADC15 0x0732
+#define MT6323_AUXADC_ADC16 0x0734
+#define MT6323_AUXADC_ADC17 0x0736
+#define MT6323_AUXADC_ADC18 0x0738
+#define MT6323_AUXADC_ADC19 0x073A
+#define MT6323_AUXADC_ADC20 0x073C
+#define MT6323_AUXADC_RSV1 0x073E
+#define MT6323_AUXADC_RSV2 0x0740
+#define MT6323_AUXADC_CON0 0x0742
+#define MT6323_AUXADC_CON1 0x0744
+#define MT6323_AUXADC_CON2 0x0746
+#define MT6323_AUXADC_CON3 0x0748
+#define MT6323_AUXADC_CON4 0x074A
+#define MT6323_AUXADC_CON5 0x074C
+#define MT6323_AUXADC_CON6 0x074E
+#define MT6323_AUXADC_CON7 0x0750
+#define MT6323_AUXADC_CON8 0x0752
+#define MT6323_AUXADC_CON9 0x0754
+#define MT6323_AUXADC_CON10 0x0756
+#define MT6323_AUXADC_CON11 0x0758
+#define MT6323_AUXADC_CON12 0x075A
+#define MT6323_AUXADC_CON13 0x075C
+#define MT6323_AUXADC_CON14 0x075E
+#define MT6323_AUXADC_CON15 0x0760
+#define MT6323_AUXADC_CON16 0x0762
+#define MT6323_AUXADC_CON17 0x0764
+#define MT6323_AUXADC_CON18 0x0766
+#define MT6323_AUXADC_CON19 0x0768
+#define MT6323_AUXADC_CON20 0x076A
+#define MT6323_AUXADC_CON21 0x076C
+#define MT6323_AUXADC_CON22 0x076E
+#define MT6323_AUXADC_CON23 0x0770
+#define MT6323_AUXADC_CON24 0x0772
+#define MT6323_AUXADC_CON25 0x0774
+#define MT6323_AUXADC_CON26 0x0776
+#define MT6323_AUXADC_CON27 0x0778
+#define MT6323_ACCDET_CON0 0x077A
+#define MT6323_ACCDET_CON1 0x077C
+#define MT6323_ACCDET_CON2 0x077E
+#define MT6323_ACCDET_CON3 0x0780
+#define MT6323_ACCDET_CON4 0x0782
+#define MT6323_ACCDET_CON5 0x0784
+#define MT6323_ACCDET_CON6 0x0786
+#define MT6323_ACCDET_CON7 0x0788
+#define MT6323_ACCDET_CON8 0x078A
+#define MT6323_ACCDET_CON9 0x078C
+#define MT6323_ACCDET_CON10 0x078E
+#define MT6323_ACCDET_CON11 0x0790
+#define MT6323_ACCDET_CON12 0x0792
+#define MT6323_ACCDET_CON13 0x0794
+#define MT6323_ACCDET_CON14 0x0796
+#define MT6323_ACCDET_CON15 0x0798
+#define MT6323_ACCDET_CON16 0x079A
+
+#endif /* __MFD_MT6323_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 45b8e8aa1fbf..d678f526e498 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -60,6 +60,8 @@ struct mt6397_chip {
u16 wake_mask[2];
u16 irq_masks_cur[2];
u16 irq_masks_cache[2];
+ u16 int_con[2];
+ u16 int_status[2];
};
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index c800dbc42079..5c9a1d44c125 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -580,7 +580,9 @@ struct palmas_usb {
int vbus_irq;
int gpio_id_irq;
+ int gpio_vbus_irq;
struct gpio_desc *id_gpiod;
+ struct gpio_desc *vbus_gpiod;
unsigned long sw_debounce_jiffies;
struct delayed_work wq_detectid;
@@ -589,6 +591,7 @@ struct palmas_usb {
bool enable_vbus_detection;
bool enable_id_detection;
bool enable_gpio_id_detection;
+ bool enable_gpio_vbus_detection;
};
#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index fd413ccab915..8d0a392e0a7f 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -28,8 +28,6 @@
#include <linux/types.h>
#include <linux/regmap.h>
-#define RC5T583_MAX_REGS 0xF8
-
/* Maximum number of main interrupts */
#define MAX_MAIN_INTERRUPT 5
#define RC5T583_MAX_GPEDGE_REG 2
@@ -169,6 +167,9 @@
#define RC5T583_RTC_AY_MONTH 0xF3
#define RC5T583_RTC_AY_YEAR 0xF4
+#define RC5T583_MAX_REG 0xF7
+#define RC5T583_NUM_REGS (RC5T583_MAX_REG + 1)
+
/* RICOH_RC5T583 IRQ definitions */
enum {
RC5T583_IRQ_ONKEY,
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 75e543b78f53..1088149be0c9 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -29,24 +29,24 @@ extern struct regmap *syscon_regmap_lookup_by_phandle(
#else
static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
}
#endif
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index 558a485d03ab..238c8db953eb 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -422,6 +422,7 @@
#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
+#define IMX6SX_GPR5_PCIE_BTNRST_RESET BIT(19)
#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
@@ -435,6 +436,10 @@
#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30)
+#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
+#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
+
/* For imx6ul iomux gpr register field define */
#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 24b86d538e88..05d58ee5e6a7 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -65,6 +65,10 @@
* Some controllers can support SDIO IRQ signalling.
*/
#define TMIO_MMC_SDIO_IRQ (1 << 2)
+
+/* Some controllers don't need to wait 10ms for clock changes */
+#define TMIO_MMC_FAST_CLK_CHG (1 << 3)
+
/*
* Some controllers require waiting for the SD bus to become
* idle before writing to some registers.
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
new file mode 100644
index 000000000000..a228ae4c88d9
--- /dev/null
+++ b/include/linux/mfd/tps65086.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#ifndef __LINUX_MFD_TPS65086_H
+#define __LINUX_MFD_TPS65086_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* List of registers for TPS65086 */
+#define TPS65086_DEVICEID 0x01
+#define TPS65086_IRQ 0x02
+#define TPS65086_IRQ_MASK 0x03
+#define TPS65086_PMICSTAT 0x04
+#define TPS65086_SHUTDNSRC 0x05
+#define TPS65086_BUCK1CTRL 0x20
+#define TPS65086_BUCK2CTRL 0x21
+#define TPS65086_BUCK3DECAY 0x22
+#define TPS65086_BUCK3VID 0x23
+#define TPS65086_BUCK3SLPCTRL 0x24
+#define TPS65086_BUCK4CTRL 0x25
+#define TPS65086_BUCK5CTRL 0x26
+#define TPS65086_BUCK6CTRL 0x27
+#define TPS65086_LDOA2CTRL 0x28
+#define TPS65086_LDOA3CTRL 0x29
+#define TPS65086_DISCHCTRL1 0x40
+#define TPS65086_DISCHCTRL2 0x41
+#define TPS65086_DISCHCTRL3 0x42
+#define TPS65086_PG_DELAY1 0x43
+#define TPS65086_FORCESHUTDN 0x91
+#define TPS65086_BUCK1SLPCTRL 0x92
+#define TPS65086_BUCK2SLPCTRL 0x93
+#define TPS65086_BUCK4VID 0x94
+#define TPS65086_BUCK4SLPVID 0x95
+#define TPS65086_BUCK5VID 0x96
+#define TPS65086_BUCK5SLPVID 0x97
+#define TPS65086_BUCK6VID 0x98
+#define TPS65086_BUCK6SLPVID 0x99
+#define TPS65086_LDOA2VID 0x9A
+#define TPS65086_LDOA3VID 0x9B
+#define TPS65086_BUCK123CTRL 0x9C
+#define TPS65086_PG_DELAY2 0x9D
+#define TPS65086_PIN_EN_MASK1 0x9E
+#define TPS65086_PIN_EN_MASK2 0x9F
+#define TPS65086_SWVTT_EN 0x9F
+#define TPS65086_PIN_EN_OVR1 0xA0
+#define TPS65086_PIN_EN_OVR2 0xA1
+#define TPS65086_GPOCTRL 0xA1
+#define TPS65086_PWR_FAULT_MASK1 0xA2
+#define TPS65086_PWR_FAULT_MASK2 0xA3
+#define TPS65086_GPO1PG_CTRL1 0xA4
+#define TPS65086_GPO1PG_CTRL2 0xA5
+#define TPS65086_GPO4PG_CTRL1 0xA6
+#define TPS65086_GPO4PG_CTRL2 0xA7
+#define TPS65086_GPO2PG_CTRL1 0xA8
+#define TPS65086_GPO2PG_CTRL2 0xA9
+#define TPS65086_GPO3PG_CTRL1 0xAA
+#define TPS65086_GPO3PG_CTRL2 0xAB
+#define TPS65086_LDOA1CTRL 0xAE
+#define TPS65086_PG_STATUS1 0xB0
+#define TPS65086_PG_STATUS2 0xB1
+#define TPS65086_PWR_FAULT_STATUS1 0xB2
+#define TPS65086_PWR_FAULT_STATUS2 0xB3
+#define TPS65086_TEMPCRIT 0xB4
+#define TPS65086_TEMPHOT 0xB5
+#define TPS65086_OC_STATUS 0xB6
+
+/* IRQ Register field definitions */
+#define TPS65086_IRQ_DIETEMP_MASK BIT(0)
+#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
+#define TPS65086_IRQ_FAULT_MASK BIT(7)
+
+/* DEVICEID Register field definitions */
+#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
+#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
+#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
+
+/* VID Masks */
+#define BUCK_VID_MASK GENMASK(7, 1)
+#define VDOA1_VID_MASK GENMASK(4, 1)
+#define VDOA23_VID_MASK GENMASK(3, 0)
+
+/* Define the TPS65086 IRQ numbers */
+enum tps65086_irqs {
+ TPS65086_IRQ_DIETEMP,
+ TPS65086_IRQ_SHUTDN,
+ TPS65086_IRQ_FAULT,
+};
+
+/**
+ * struct tps65086 - state holder for the tps65086 driver
+ *
+ * Device data may be used to access the TPS65086 chip
+ */
+struct tps65086 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ /* IRQ Data */
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __LINUX_MFD_TPS65086_H */
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 0bf2708df150..67d144b3b8f9 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -77,6 +77,11 @@ enum {
#define TPS65090_REG_CG_CTRL5 0x09
#define TPS65090_REG_CG_STATUS1 0x0a
#define TPS65090_REG_CG_STATUS2 0x0b
+#define TPS65090_REG_AD_OUT1 0x17
+#define TPS65090_REG_AD_OUT2 0x18
+
+#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2
+#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1)
struct tps65090 {
struct device *dev;
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 6d309032dc0d..1a603701550e 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,28 +1,27 @@
/*
- * tps65912.h -- TI TPS6591x
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Copyright 2011 Texas Instruments Inc.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * Author: Margarita Olaya <magi@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
#ifndef __LINUX_MFD_TPS65912_H
#define __LINUX_MFD_TPS65912_H
-/* TPS regulator type list */
-#define REGULATOR_LDO 0
-#define REGULATOR_DCDC 1
-
-/*
- * List of registers for TPS65912
- */
+#include <linux/device.h>
+#include <linux/regmap.h>
+/* List of registers for TPS65912 */
#define TPS65912_DCDC1_CTRL 0x00
#define TPS65912_DCDC2_CTRL 0x01
#define TPS65912_DCDC3_CTRL 0x02
@@ -126,41 +125,45 @@
#define TPS65912_VERNUM 0x64
#define TPS6591X_MAX_REGISTER 0x64
-/* IRQ Definitions */
-#define TPS65912_IRQ_PWRHOLD_F 0
-#define TPS65912_IRQ_VMON 1
-#define TPS65912_IRQ_PWRON 2
-#define TPS65912_IRQ_PWRON_LP 3
-#define TPS65912_IRQ_PWRHOLD_R 4
-#define TPS65912_IRQ_HOTDIE 5
-#define TPS65912_IRQ_GPIO1_R 6
-#define TPS65912_IRQ_GPIO1_F 7
-#define TPS65912_IRQ_GPIO2_R 8
-#define TPS65912_IRQ_GPIO2_F 9
-#define TPS65912_IRQ_GPIO3_R 10
-#define TPS65912_IRQ_GPIO3_F 11
-#define TPS65912_IRQ_GPIO4_R 12
-#define TPS65912_IRQ_GPIO4_F 13
-#define TPS65912_IRQ_GPIO5_R 14
-#define TPS65912_IRQ_GPIO5_F 15
-#define TPS65912_IRQ_PGOOD_DCDC1 16
-#define TPS65912_IRQ_PGOOD_DCDC2 17
-#define TPS65912_IRQ_PGOOD_DCDC3 18
-#define TPS65912_IRQ_PGOOD_DCDC4 19
-#define TPS65912_IRQ_PGOOD_LDO1 20
-#define TPS65912_IRQ_PGOOD_LDO2 21
-#define TPS65912_IRQ_PGOOD_LDO3 22
-#define TPS65912_IRQ_PGOOD_LDO4 23
-#define TPS65912_IRQ_PGOOD_LDO5 24
-#define TPS65912_IRQ_PGOOD_LDO6 25
-#define TPS65912_IRQ_PGOOD_LDO7 26
-#define TPS65912_IRQ_PGOOD_LD08 27
-#define TPS65912_IRQ_PGOOD_LDO9 28
-#define TPS65912_IRQ_PGOOD_LDO10 29
+/* INT_STS Register field definitions */
+#define TPS65912_INT_STS_PWRHOLD_F BIT(0)
+#define TPS65912_INT_STS_VMON BIT(1)
+#define TPS65912_INT_STS_PWRON BIT(2)
+#define TPS65912_INT_STS_PWRON_LP BIT(3)
+#define TPS65912_INT_STS_PWRHOLD_R BIT(4)
+#define TPS65912_INT_STS_HOTDIE BIT(5)
+#define TPS65912_INT_STS_GPIO1_R BIT(6)
+#define TPS65912_INT_STS_GPIO1_F BIT(7)
+
+/* INT_STS Register field definitions */
+#define TPS65912_INT_STS2_GPIO2_R BIT(0)
+#define TPS65912_INT_STS2_GPIO2_F BIT(1)
+#define TPS65912_INT_STS2_GPIO3_R BIT(2)
+#define TPS65912_INT_STS2_GPIO3_F BIT(3)
+#define TPS65912_INT_STS2_GPIO4_R BIT(4)
+#define TPS65912_INT_STS2_GPIO4_F BIT(5)
+#define TPS65912_INT_STS2_GPIO5_R BIT(6)
+#define TPS65912_INT_STS2_GPIO5_F BIT(7)
-#define TPS65912_NUM_IRQ 30
+/* INT_STS Register field definitions */
+#define TPS65912_INT_STS3_PGOOD_DCDC1 BIT(0)
+#define TPS65912_INT_STS3_PGOOD_DCDC2 BIT(1)
+#define TPS65912_INT_STS3_PGOOD_DCDC3 BIT(2)
+#define TPS65912_INT_STS3_PGOOD_DCDC4 BIT(3)
+#define TPS65912_INT_STS3_PGOOD_LDO1 BIT(4)
+#define TPS65912_INT_STS3_PGOOD_LDO2 BIT(5)
+#define TPS65912_INT_STS3_PGOOD_LDO3 BIT(6)
+#define TPS65912_INT_STS3_PGOOD_LDO4 BIT(7)
-/* GPIO 1 and 2 Register Definitions */
+/* INT_STS Register field definitions */
+#define TPS65912_INT_STS4_PGOOD_LDO5 BIT(0)
+#define TPS65912_INT_STS4_PGOOD_LDO6 BIT(1)
+#define TPS65912_INT_STS4_PGOOD_LDO7 BIT(2)
+#define TPS65912_INT_STS4_PGOOD_LDO8 BIT(3)
+#define TPS65912_INT_STS4_PGOOD_LDO9 BIT(4)
+#define TPS65912_INT_STS4_PGOOD_LDO10 BIT(5)
+
+/* GPIO 1 and 2 Register field definitions */
#define GPIO_SLEEP_MASK 0x80
#define GPIO_SLEEP_SHIFT 7
#define GPIO_DEB_MASK 0x10
@@ -172,7 +175,7 @@
#define GPIO_SET_MASK 0x01
#define GPIO_SET_SHIFT 0
-/* GPIO 3 Register Definitions */
+/* GPIO 3 Register field definitions */
#define GPIO3_SLEEP_MASK 0x80
#define GPIO3_SLEEP_SHIFT 7
#define GPIO3_SEL_MASK 0x40
@@ -190,7 +193,7 @@
#define GPIO3_SET_MASK 0x01
#define GPIO3_SET_SHIFT 0
-/* GPIO 4 Register Definitions */
+/* GPIO 4 Register field definitions */
#define GPIO4_SLEEP_MASK 0x80
#define GPIO4_SLEEP_SHIFT 7
#define GPIO4_SEL_MASK 0x40
@@ -264,65 +267,75 @@
#define DCDC_LIMIT_MAX_SEL_MASK 0x3F
#define DCDC_LIMIT_MAX_SEL_SHIFT 0
-/**
- * struct tps65912_board
- * Board platform dat may be used to initialize regulators.
- */
-struct tps65912_board {
- int is_dcdc1_avs;
- int is_dcdc2_avs;
- int is_dcdc3_avs;
- int is_dcdc4_avs;
- int irq;
- int irq_base;
- int gpio_base;
- struct regulator_init_data *tps65912_pmic_init_data;
+/* Define the TPS65912 IRQ numbers */
+enum tps65912_irqs {
+ /* INT_STS registers */
+ TPS65912_IRQ_PWRHOLD_F,
+ TPS65912_IRQ_VMON,
+ TPS65912_IRQ_PWRON,
+ TPS65912_IRQ_PWRON_LP,
+ TPS65912_IRQ_PWRHOLD_R,
+ TPS65912_IRQ_HOTDIE,
+ TPS65912_IRQ_GPIO1_R,
+ TPS65912_IRQ_GPIO1_F,
+ /* INT_STS2 registers */
+ TPS65912_IRQ_GPIO2_R,
+ TPS65912_IRQ_GPIO2_F,
+ TPS65912_IRQ_GPIO3_R,
+ TPS65912_IRQ_GPIO3_F,
+ TPS65912_IRQ_GPIO4_R,
+ TPS65912_IRQ_GPIO4_F,
+ TPS65912_IRQ_GPIO5_R,
+ TPS65912_IRQ_GPIO5_F,
+ /* INT_STS3 registers */
+ TPS65912_IRQ_PGOOD_DCDC1,
+ TPS65912_IRQ_PGOOD_DCDC2,
+ TPS65912_IRQ_PGOOD_DCDC3,
+ TPS65912_IRQ_PGOOD_DCDC4,
+ TPS65912_IRQ_PGOOD_LDO1,
+ TPS65912_IRQ_PGOOD_LDO2,
+ TPS65912_IRQ_PGOOD_LDO3,
+ TPS65912_IRQ_PGOOD_LDO4,
+ /* INT_STS4 registers */
+ TPS65912_IRQ_PGOOD_LDO5,
+ TPS65912_IRQ_PGOOD_LDO6,
+ TPS65912_IRQ_PGOOD_LDO7,
+ TPS65912_IRQ_PGOOD_LDO8,
+ TPS65912_IRQ_PGOOD_LDO9,
+ TPS65912_IRQ_PGOOD_LDO10,
};
-/**
- * struct tps65912 - tps65912 sub-driver chip access routines
+/*
+ * struct tps65912 - state holder for the tps65912 driver
+ *
+ * Device data may be used to access the TPS65912 chip
*/
-
struct tps65912 {
struct device *dev;
- /* for read/write acces */
- struct mutex io_mutex;
-
- /* For device IO interfaces: I2C or SPI */
- void *control_data;
-
- int (*read)(struct tps65912 *tps65912, u8 reg, int size, void *dest);
- int (*write)(struct tps65912 *tps65912, u8 reg, int size, void *src);
-
- /* Client devices */
- struct tps65912_pmic *pmic;
+ struct regmap *regmap;
- /* GPIO Handling */
- struct gpio_chip gpio;
+ /* IRQ Data */
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
- /* IRQ Handling */
- struct mutex irq_lock;
- int chip_irq;
- int irq_base;
- int irq_num;
- u32 irq_mask;
+static const struct regmap_range tps65912_yes_ranges[] = {
+ regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
};
-struct tps65912_platform_data {
- int irq;
- int irq_base;
+static const struct regmap_access_table tps65912_volatile_table = {
+ .yes_ranges = tps65912_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
};
-unsigned int tps_chip(void);
+static const struct regmap_config tps65912_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65912_volatile_table,
+};
-int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask);
-int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask);
-int tps65912_reg_read(struct tps65912 *tps65912, u8 reg);
-int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val);
-int tps65912_device_init(struct tps65912 *tps65912);
-void tps65912_device_exit(struct tps65912 *tps65912);
-int tps65912_irq_init(struct tps65912 *tps65912, int irq,
- struct tps65912_platform_data *pdata);
-int tps65912_irq_exit(struct tps65912 *tps65912);
+int tps65912_device_init(struct tps65912 *tps);
+int tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index cac1c0904d5f..9b50325e4ddf 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -23,9 +23,13 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
- MR_CMA
+ MR_CMA,
+ MR_TYPES
};
+/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+extern char *migrate_reason_names[MR_TYPES];
+
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a0e8cc8dcc67..8541a913f6a3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -219,6 +219,7 @@ enum {
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
+ MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
};
enum {
@@ -1160,6 +1161,8 @@ enum mlx4_net_trans_promisc_mode {
MLX4_FS_REGULAR = 1,
MLX4_FS_ALL_DEFAULT,
MLX4_FS_MC_DEFAULT,
+ MLX4_FS_MIRROR_RX_PORT,
+ MLX4_FS_MIRROR_SX_PORT,
MLX4_FS_UC_SNIFFER,
MLX4_FS_MC_SNIFFER,
MLX4_FS_MODE_NUM, /* should be last */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 2e8af001c5da..bd0e7075ea6d 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DRIVER_H
#define MLX4_DRIVER_H
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
struct mlx4_dev;
@@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
+
static inline u64 mlx4_mac_to_u64(u8 *addr)
{
u64 mac = 0;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 987764afa65c..8156e3c9239c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -105,6 +105,29 @@ __mlx5_mask(typ, fld))
___t; \
})
+/* Big endian getters */
+#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
+ __mlx5_64_off(typ, fld)))
+
+#define MLX5_GET_BE(type_t, typ, p, fld) ({ \
+ type_t tmp; \
+ switch (sizeof(tmp)) { \
+ case sizeof(u8): \
+ tmp = (__force type_t)MLX5_GET(typ, p, fld); \
+ break; \
+ case sizeof(u16): \
+ tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
+ break; \
+ case sizeof(u32): \
+ tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
+ break; \
+ case sizeof(u64): \
+ tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
+ break; \
+ } \
+ tmp; \
+ })
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -351,6 +374,12 @@ enum {
};
enum {
+ MLX5_BW_NO_LIMIT = 0,
+ MLX5_100_MBPS_UNIT = 3,
+ MLX5_GBPS_UNIT = 4,
+};
+
+enum {
MLX5_MAX_PAGE_SHIFT = 31
};
@@ -1177,6 +1206,17 @@ enum {
MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
};
+enum mlx5_wol_mode {
+ MLX5_WOL_DISABLE = 0,
+ MLX5_WOL_SECURED_MAGIC = 1 << 1,
+ MLX5_WOL_MAGIC = 1 << 2,
+ MLX5_WOL_ARP = 1 << 3,
+ MLX5_WOL_BROADCAST = 1 << 4,
+ MLX5_WOL_MULTICAST = 1 << 5,
+ MLX5_WOL_UNICAST = 1 << 6,
+ MLX5_WOL_PHY_ACTIVITY = 1 << 7,
+};
+
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@@ -1196,6 +1236,8 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
+ MLX5_CAP_RESERVED,
+ MLX5_CAP_VECTOR_CALC,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1258,6 +1300,10 @@ enum mlx5_cap_type {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
+ MLX5_GET(vector_calc_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1284,7 +1330,8 @@ enum {
MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
- MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+ MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
@@ -1294,6 +1341,11 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_BY_PASS_NUM_PRIOS 9
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
+#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
+ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
+ MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1e3006dcf35d..dcd5ac8d3b14 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -54,7 +54,7 @@ enum {
/* one minute for the sake of bringup. Generally, commands must always
* complete and we may need to increase this timeout value
*/
- MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
+ MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -99,6 +99,8 @@ enum {
};
enum {
+ MLX5_REG_QETCR = 0x4005,
+ MLX5_REG_QTCT = 0x400a,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -338,7 +340,7 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-struct mlx5_core_mr {
+struct mlx5_core_mkey {
u64 iova;
u64 size;
u32 key;
@@ -426,7 +428,7 @@ struct mlx5_srq_table {
struct radix_tree_root tree;
};
-struct mlx5_mr_table {
+struct mlx5_mkey_table {
/* protect radix tree
*/
rwlock_t lock;
@@ -458,8 +460,6 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
- struct io_mapping *bf_mapping;
-
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
@@ -484,9 +484,9 @@ struct mlx5_priv {
struct mlx5_cq_table cq_table;
/* end: cq staff */
- /* start: mr staff */
- struct mlx5_mr_table mr_table;
- /* end: mr staff */
+ /* start: mkey staff */
+ struct mlx5_mkey_table mkey_table;
+ /* end: mkey staff */
/* start: alloc staff */
/* protect buffer alocation according to numa node */
@@ -613,7 +613,10 @@ struct mlx5_pas {
};
enum port_state_policy {
- MLX5_AAA_000
+ MLX5_POLICY_DOWN = 0,
+ MLX5_POLICY_UP = 1,
+ MLX5_POLICY_FOLLOW = 2,
+ MLX5_POLICY_INVALID = 0xffffffff
};
enum phy_port_state {
@@ -706,8 +709,7 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
- enum mlx5_cap_mode cap_mode);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -717,7 +719,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+ bool map_wc);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@ -739,16 +742,18 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
-void mlx5_init_mr_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
struct mlx5_query_mkey_mbox_out *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
@@ -794,37 +799,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
-int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
- u32 *proto_cap, int proto_mask);
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
- u32 *proto_admin, int proto_mask);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port);
-int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
- int proto_mask);
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status *status);
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
- u8 port);
-
-int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
- u8 *vl_hw_cap, u8 local_port);
-
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
- u32 *rx_pause, u32 *tx_pause);
-
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -847,6 +821,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
+int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
+ u8 port_num, void *out, size_t sz);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 8230caa3fb6e..8dec5508d93d 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,6 +38,10 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+};
+
#define LEFTOVERS_RULE_NUM 2
static inline void build_leftovers_ft_param(int *priority,
int *n_ent,
@@ -52,6 +56,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
+ MLX5_FLOW_NAMESPACE_ANCHOR,
MLX5_FLOW_NAMESPACE_FDB,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 51f1e540fc2b..c15b8a864937 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -166,6 +166,8 @@ enum {
MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
+ MLX5_CMD_OP_SET_WOL_ROL = 0x830,
+ MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -458,7 +460,8 @@ struct mlx5_ifc_ads_bits {
};
struct mlx5_ifc_flow_table_nic_cap_bits {
- u8 reserved_at_0[0x200];
+ u8 nic_rx_multi_path_tirs[0x1];
+ u8 reserved_at_1[0x1ff];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@@ -615,6 +618,33 @@ struct mlx5_ifc_odp_cap_bits {
u8 reserved_at_e0[0x720];
};
+struct mlx5_ifc_calc_op {
+ u8 reserved_at_0[0x10];
+ u8 reserved_at_10[0x9];
+ u8 op_swap_endianness[0x1];
+ u8 op_min[0x1];
+ u8 op_xor[0x1];
+ u8 op_or[0x1];
+ u8 op_and[0x1];
+ u8 op_max[0x1];
+ u8 op_add[0x1];
+};
+
+struct mlx5_ifc_vector_calc_cap_bits {
+ u8 calc_matrix[0x1];
+ u8 reserved_at_1[0x1f];
+ u8 reserved_at_20[0x8];
+ u8 max_vec_count[0x8];
+ u8 reserved_at_30[0xd];
+ u8 max_chunk_size[0x3];
+ struct mlx5_ifc_calc_op calc0;
+ struct mlx5_ifc_calc_op calc1;
+ struct mlx5_ifc_calc_op calc2;
+ struct mlx5_ifc_calc_op calc3;
+
+ u8 reserved_at_e0[0x720];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -729,14 +759,28 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_1bf[0x3];
u8 log_max_msg[0x5];
- u8 reserved_at_1c7[0x18];
+ u8 reserved_at_1c7[0x4];
+ u8 max_tc[0x4];
+ u8 reserved_at_1cf[0x6];
+ u8 rol_s[0x1];
+ u8 rol_g[0x1];
+ u8 reserved_at_1d7[0x1];
+ u8 wol_s[0x1];
+ u8 wol_g[0x1];
+ u8 wol_a[0x1];
+ u8 wol_b[0x1];
+ u8 wol_m[0x1];
+ u8 wol_u[0x1];
+ u8 wol_p[0x1];
u8 stat_rate_support[0x10];
u8 reserved_at_1ef[0xc];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
- u8 reserved_at_200[0xe];
+ u8 reserved_at_200[0x3];
+ u8 ipoib_basic_offloads[0x1];
+ u8 reserved_at_204[0xa];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
@@ -767,10 +811,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cd[0x1];
u8 reserved_at_22c[0x1];
u8 apm[0x1];
- u8 reserved_at_22e[0x7];
+ u8 vector_calc[0x1];
+ u8 reserved_at_22f[0x1];
+ u8 imaicl[0x1];
+ u8 reserved_at_231[0x4];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_at_237[0x4];
+ u8 set_deth_sqpn[0x1];
+ u8 reserved_at_239[0x3];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
@@ -1208,6 +1256,36 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
u8 reserved_at_640[0x180];
};
+struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
+ u8 symbol_error_counter[0x10];
+
+ u8 link_error_recovery_counter[0x8];
+
+ u8 link_downed_counter[0x8];
+
+ u8 port_rcv_errors[0x10];
+
+ u8 port_rcv_remote_physical_errors[0x10];
+
+ u8 port_rcv_switch_relay_errors[0x10];
+
+ u8 port_xmit_discards[0x10];
+
+ u8 port_xmit_constraint_errors[0x8];
+
+ u8 port_rcv_constraint_errors[0x8];
+
+ u8 reserved_at_70[0x8];
+
+ u8 link_overrun_errors[0x8];
+
+ u8 reserved_at_80[0x10];
+
+ u8 vl_15_dropped[0x10];
+
+ u8 reserved_at_a0[0xa0];
+};
+
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
u8 transmit_queue_high[0x20];
@@ -1780,7 +1858,7 @@ struct mlx5_ifc_qpc_bits {
u8 log_sq_size[0x4];
u8 reserved_at_55[0x6];
u8 rlky[0x1];
- u8 reserved_at_5c[0x4];
+ u8 ulp_stateless_offload_mode[0x4];
u8 counter_set_id[0x8];
u8 uar_page[0x18];
@@ -1904,6 +1982,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
u8 reserved_at_0[0x8000];
};
@@ -2618,6 +2697,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
u8 reserved_at_0[0x7c0];
};
@@ -3126,7 +3206,8 @@ struct mlx5_ifc_query_vport_counter_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 reserved_at_41[0xb];
+ u8 port_num[0x4];
u8 vport_number[0x10];
u8 reserved_at_60[0x60];
@@ -3629,6 +3710,12 @@ struct mlx5_ifc_query_hca_vport_pkey_in_bits {
u8 pkey_index[0x10];
};
+enum {
+ MLX5_HCA_VPORT_SEL_PORT_GUID = 1 << 0,
+ MLX5_HCA_VPORT_SEL_NODE_GUID = 1 << 1,
+ MLX5_HCA_VPORT_SEL_STATE_POLICY = 1 << 2,
+};
+
struct mlx5_ifc_query_hca_vport_gid_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4245,7 +4332,9 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_20[0x1b];
u8 self_lb_en[0x1];
- u8 reserved_at_3c[0x3];
+ u8 reserved_at_3c[0x1];
+ u8 hash[0x1];
+ u8 reserved_at_3e[0x1];
u8 lro[0x1];
};
@@ -6871,6 +6960,54 @@ struct mlx5_ifc_mtt_bits {
u8 rd_en[0x1];
};
+struct mlx5_ifc_query_wol_rol_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_wol_rol_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 rol_mode_valid[0x1];
+ u8 wol_mode_valid[0x1];
+ u8 reserved_at_42[0xe];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_at_60[0x20];
+};
+
enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
@@ -6954,6 +7091,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_peir_reg_bits peir_reg;
struct mlx5_ifc_pelc_reg_bits pelc_reg;
struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_pifr_reg_bits pifr_reg;
struct mlx5_ifc_pipg_reg_bits pipg_reg;
@@ -7061,4 +7199,49 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_100[0x100];
};
+struct mlx5_ifc_ets_tcn_config_reg_bits {
+ u8 g[0x1];
+ u8 b[0x1];
+ u8 r[0x1];
+ u8 reserved_at_3[0x9];
+ u8 group[0x4];
+ u8 reserved_at_10[0x9];
+ u8 bw_allocation[0x7];
+
+ u8 reserved_at_20[0xc];
+ u8 max_bw_units[0x4];
+ u8 reserved_at_30[0x8];
+ u8 max_bw_value[0x8];
+};
+
+struct mlx5_ifc_ets_global_config_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 r[0x1];
+ u8 reserved_at_3[0x1d];
+
+ u8 reserved_at_20[0xc];
+ u8 max_bw_units[0x4];
+ u8 reserved_at_30[0x8];
+ u8 max_bw_value[0x8];
+};
+
+struct mlx5_ifc_qetc_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_at_10[0x30];
+
+ struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8];
+ struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
+};
+
+struct mlx5_ifc_qtct_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_at_10[0xd];
+ u8 prio[0x3];
+
+ u8 reserved_at_20[0x1d];
+ u8 tclass[0x3];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
new file mode 100644
index 000000000000..a1d145abd4eb
--- /dev/null
+++ b/include/linux/mlx5/port.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_PORT_H__
+#define __MLX5_PORT_H__
+
+#include <linux/mlx5/driver.h>
+
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 5b8c89ffaa58..cf031a3f16c5 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -499,7 +499,8 @@ struct mlx5_qp_context {
u8 reserved2[4];
__be32 next_send_psn;
__be32 cqn_send;
- u8 reserved3[8];
+ __be32 deth_sqpn;
+ u8 reserved3[4];
__be32 last_acked_psn;
__be32 ssn;
__be32 params2;
@@ -621,9 +622,9 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
}
-static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
+static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
{
- return radix_tree_lookup(&dev->priv.mr_table.tree, key);
+ return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
struct mlx5_page_fault_resume_mbox_in {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 123771003e68..bd93e6323603 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -92,5 +92,12 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
+ int vf, u8 port_num, void *out,
+ size_t out_sz);
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ int vf,
+ struct mlx5_hca_vport_context *req);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 516e14944339..ed6407d1b7b5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -22,6 +22,7 @@
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
+#include <linux/page_ref.h>
struct mempolicy;
struct anon_vma;
@@ -82,6 +83,27 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define mm_forbids_zeropage(X) (0)
#endif
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ *
+ * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
+ * not a hard limit any more. Although some userspace tools can be surprised by
+ * that.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+extern int sysctl_max_map_count;
+
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
@@ -122,6 +144,7 @@ extern unsigned int kobjsize(const void *objp);
/*
* vm_flags in vm_area_struct, see mm_types.h.
+ * When changing, update also include/trace/events/mmflags.h
*/
#define VM_NONE 0x00000000
@@ -170,8 +193,26 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
+#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
+#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
+#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
+#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
+#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
+#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
+# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
+# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
+# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
+# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
+# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
+#endif
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
@@ -233,6 +274,8 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x20 /* Second try */
#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
+#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
+#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -364,8 +407,8 @@ static inline int pmd_devmap(pmd_t pmd)
*/
static inline int put_page_testzero(struct page *page)
{
- VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
- return atomic_dec_and_test(&page->_count);
+ VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
+ return page_ref_dec_and_test(page);
}
/*
@@ -376,7 +419,7 @@ static inline int put_page_testzero(struct page *page)
*/
static inline int get_page_unless_zero(struct page *page)
{
- return atomic_inc_not_zero(&page->_count);
+ return page_ref_add_unless(page, 1, 0);
}
extern int page_is_ram(unsigned long pfn);
@@ -387,7 +430,8 @@ enum {
REGION_MIXED,
};
-int region_intersects(resource_size_t offset, size_t size, const char *type);
+int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
+ unsigned long desc);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
@@ -463,11 +507,6 @@ static inline int total_mapcount(struct page *page)
}
#endif
-static inline int page_count(struct page *page)
-{
- return atomic_read(&compound_head(page)->_count);
-}
-
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
@@ -475,15 +514,6 @@ static inline struct page *virt_to_head_page(const void *x)
return compound_head(page);
}
-/*
- * Setup the page count before being freed into the page allocator for
- * the first time (boot or memory hotplug)
- */
-static inline void init_page_count(struct page *page)
-{
- atomic_set(&page->_count, 1);
-}
-
void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -693,8 +723,8 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
- VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
- atomic_inc(&page->_count);
+ VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+ page_ref_inc(page);
if (unlikely(is_zone_device_page(page)))
get_zone_device_page(page);
@@ -904,20 +934,11 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
{
return page->mem_cgroup;
}
-
-static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
-{
- page->mem_cgroup = memcg;
-}
#else
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return NULL;
}
-
-static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
-{
-}
#endif
/*
@@ -1051,8 +1072,6 @@ static inline void clear_page_pfmemalloc(struct page *page)
* just gets major/minor fault counters bumped up.
*/
-#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
-
#define VM_FAULT_OOM 0x0001
#define VM_FAULT_SIGBUS 0x0002
#define VM_FAULT_MAJOR 0x0004
@@ -1113,6 +1132,8 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
+ bool ignore_dirty; /* Ignore dirty pages */
+ bool check_swap_entries; /* Check also swap entries */
};
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1225,24 +1246,82 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int foll_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking);
-long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- struct vm_area_struct **vmas);
-long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked);
+long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+long get_user_pages6(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags);
-long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+/* suppress warnings from use in EXPORT_SYMBOL() */
+#ifndef __DISABLE_GUP_DEPRECATED
+#define __gup_deprecated __deprecated
+#else
+#define __gup_deprecated
+#endif
+/*
+ * These macros provide backward-compatibility with the old
+ * get_user_pages() variants which took tsk/mm. These
+ * functions/macros provide both compile-time __deprecated so we
+ * can catch old-style use and not break the build. The actual
+ * functions also have WARN_ON()s to let us know at runtime if
+ * the get_user_pages() should have been the "remote" variant.
+ *
+ * These are hideous, but temporary.
+ *
+ * If you run into one of these __deprecated warnings, look
+ * at how you are calling get_user_pages(). If you are calling
+ * it with current/current->mm as the first two arguments,
+ * simply remove those arguments. The behavior will be the same
+ * as it is now. If you are calling it on another task, use
+ * get_user_pages_remote() instead.
+ *
+ * Any questions? Ask Dave Hansen <dave@sr71.net>
+ */
+long
+__gup_deprecated
+get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
+ get_user_pages
+#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \
+ get_user_pages8, x, \
+ get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
+
+__gup_deprecated
+long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked);
+#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
+ get_user_pages_locked
+#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \
+ get_user_pages_locked8, x, \
+ get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
+
+__gup_deprecated
+long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
+#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \
+ get_user_pages_unlocked
+#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \
+ get_user_pages_unlocked7, x, \
+ get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
+
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
@@ -1299,10 +1378,9 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping,
- struct mem_cgroup *memcg);
+void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct mem_cgroup *memcg, struct bdi_writeback *wb);
+ struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
void cancel_dirty_page(struct page *page);
@@ -1532,8 +1610,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm)
}
#endif
-int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
/*
@@ -1659,15 +1736,15 @@ static inline void pgtable_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
-#define pte_alloc_map(mm, vma, pmd, address) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
- pmd, address))? \
- NULL: pte_offset_map(pmd, address))
+#define pte_alloc(mm, pmd, address) \
+ (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
+
+#define pte_alloc_map(mm, pmd, address) \
+ (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
- pmd, address))? \
- NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
+ (pte_alloc(mm, pmd, address) ? \
+ NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
@@ -1862,6 +1939,7 @@ extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void show_mem(unsigned int flags);
+extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -1876,6 +1954,7 @@ extern void zone_pcp_reset(struct zone *zone);
/* page_alloc.c */
extern int min_free_kbytes;
+extern int watermark_scale_factor;
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
@@ -2138,6 +2217,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2168,6 +2249,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
+#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
@@ -2175,6 +2257,17 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
+#ifdef CONFIG_PAGE_POISONING
+extern bool page_poisoning_enabled(void);
+extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern bool page_is_poisoned(struct page *page);
+#else
+static inline bool page_poisoning_enabled(void) { return false; }
+static inline void kernel_poison_pages(struct page *page, int numpages,
+ int enable) { }
+static inline bool page_is_poisoned(struct page *page) { return false; }
+#endif
+
#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled;
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
@@ -2194,14 +2287,18 @@ kernel_map_pages(struct page *page, int numpages, int enable)
}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
-#endif /* CONFIG_HIBERNATION */
-#else
+#endif /* CONFIG_HIBERNATION */
+#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
-#endif /* CONFIG_HIBERNATION */
-#endif
+#endif /* CONFIG_HIBERNATION */
+static inline bool debug_pagealloc_enabled(void)
+{
+ return false;
+}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 624b78b848b8..944b2b37313b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
}
#endif
-struct vm_special_mapping
-{
- const char *name;
+struct vm_fault;
+
+struct vm_special_mapping {
+ const char *name; /* The name, e.g. "[vdso]". */
+
+ /*
+ * If .fault is not provided, this points to a
+ * NULL-terminated array of pages that back the special mapping.
+ *
+ * This must not be NULL unless .fault is provided.
+ */
struct page **pages;
+
+ /*
+ * If non-NULL, then this is called to resolve page faults
+ * on the special mapping. If used, .pages is not checked.
+ */
+ int (*fault)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf);
};
enum tlb_flush_reason {
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 16373c8f5f57..33e17f6a327a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -35,7 +35,7 @@ static inline void vm_unacct_memory(long pages)
*/
#ifndef arch_calc_vm_prot_bits
-#define arch_calc_vm_prot_bits(prot) 0
+#define arch_calc_vm_prot_bits(prot, pkey) 0
#endif
#ifndef arch_vm_get_page_prot
@@ -70,12 +70,12 @@ static inline int arch_validate_prot(unsigned long prot)
* Combine the mmap "prot" argument into "vm_flags" used internally.
*/
static inline unsigned long
-calc_vm_prot_bits(unsigned long prot)
+calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
- arch_calc_vm_prot_bits(prot);
+ arch_calc_vm_prot_bits(prot, pkey);
}
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 37967b6da03c..b01e77de1a74 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -113,7 +113,6 @@ struct mmc_data {
#define MMC_DATA_WRITE (1 << 8)
#define MMC_DATA_READ (1 << 9)
-#define MMC_DATA_STREAM (1 << 10)
unsigned int bytes_xfered;
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 89df7abedd67..7b41c6db1bb6 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -235,21 +235,11 @@ struct dw_mci_dma_ops {
};
/* IP Quirks/flags. */
-/* Unreliable card detection */
-#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(0)
/* Timer for broken data transfer over scheme */
-#define DW_MCI_QUIRK_BROKEN_DTO BIT(1)
+#define DW_MCI_QUIRK_BROKEN_DTO BIT(0)
struct dma_pdata;
-struct block_settings {
- unsigned short max_segs; /* see blk_queue_max_segments */
- unsigned int max_blk_size; /* maximum size of one mmc block */
- unsigned int max_blk_count; /* maximum number of blocks in one req*/
- unsigned int max_req_size; /* maximum number of bytes in one req*/
- unsigned int max_seg_size; /* see blk_queue_max_segment_size */
-};
-
/* Board platform data */
struct dw_mci_board {
u32 num_slots;
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
index 84d9053b5dca..5f5cd80e9765 100644
--- a/include/linux/mmc/tmio.h
+++ b/include/linux/mmc/tmio.h
@@ -1,6 +1,8 @@
/*
* include/linux/mmc/tmio.h
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
@@ -61,6 +63,9 @@
#define TMIO_STAT_CMD_BUSY 0x40000000
#define TMIO_STAT_ILL_ACCESS 0x80000000
+#define CLK_CTL_DIV_MASK 0xff
+#define CLK_CTL_SCLKEN BIT(8)
+
#define TMIO_BBS 512 /* Boot block size */
#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 053824b0a412..de7be78c6f0e 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,7 @@ struct vm_area_struct;
struct mm_struct;
extern void dump_page(struct page *page, const char *reason);
-extern void dump_page_badflags(struct page *page, const char *reason,
- unsigned long badflags);
+extern void __dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7b6c2cfee390..c60df9257cc7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,6 +63,9 @@ enum {
MIGRATE_TYPES
};
+/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
+extern char * const migratetype_names[MIGRATE_TYPES];
+
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#else
@@ -209,10 +212,12 @@ struct zone_reclaim_stat {
};
struct lruvec {
- struct list_head lists[NR_LRU_LISTS];
- struct zone_reclaim_stat reclaim_stat;
+ struct list_head lists[NR_LRU_LISTS];
+ struct zone_reclaim_stat reclaim_stat;
+ /* Evictions & activations on the inactive file list */
+ atomic_long_t inactive_age;
#ifdef CONFIG_MEMCG
- struct zone *zone;
+ struct zone *zone;
#endif
};
@@ -487,9 +492,6 @@ struct zone {
spinlock_t lru_lock;
struct lruvec lruvec;
- /* Evictions & activations on the inactive file list */
- atomic_long_t inactive_age;
-
/*
* When free pages are below this point, additional steps are taken
* when reading the number of free pages to avoid per-cpu counter
@@ -520,6 +522,8 @@ struct zone {
bool compact_blockskip_flush;
#endif
+ bool contiguous;
+
ZONE_PADDING(_pad3_)
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
@@ -664,6 +668,12 @@ typedef struct pglist_data {
mem_hotplug_begin/end() */
int kswapd_max_order;
enum zone_type classzone_idx;
+#ifdef CONFIG_COMPACTION
+ int kcompactd_max_order;
+ enum zone_type kcompactd_classzone_idx;
+ wait_queue_head_t kcompactd_wait;
+ struct task_struct *kcompactd;
+#endif
#ifdef CONFIG_NUMA_BALANCING
/* Lock serializing the migrate rate limiting window */
spinlock_t numabalancing_migrate_lock;
@@ -758,6 +768,8 @@ static inline struct zone *lruvec_zone(struct lruvec *lruvec)
#endif
}
+extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
@@ -829,6 +841,8 @@ static inline int is_highmem(struct zone *zone)
struct ctl_table;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index a2a0068a8387..8b425c66305a 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -33,6 +33,14 @@ struct platform_msi_desc {
};
/**
+ * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
+ * @msi_index: The index of the MSI descriptor
+ */
+struct fsl_mc_msi_desc {
+ u16 msi_index;
+};
+
+/**
* struct msi_desc - Descriptor structure for MSI based interrupts
* @list: List head for management
* @irq: The base interrupt number
@@ -87,6 +95,7 @@ struct msi_desc {
* tree wide cleanup.
*/
struct platform_msi_desc platform;
+ struct fsl_mc_msi_desc fsl_mc;
};
};
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 36bb6a503f19..3bf8f954b642 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -166,7 +166,6 @@ struct bbm_info {
};
/* OneNAND BBT interface */
-extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int onenand_default_bbt(struct mtd_info *mtd);
#endif /* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 02cd5f9b79b8..8255118be0f0 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -44,7 +44,6 @@ struct INFTLrecord {
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_ecclayout oobinfo;
};
int INFTL_mount(struct INFTLrecord *s);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 58f3ba709ade..5e0eb7ccabd4 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -240,8 +240,11 @@ struct map_info {
If there is no cache to care about this can be set to NULL. */
void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
- /* set_vpp() must handle being reentered -- enable, enable, disable
- must leave it enabled. */
+ /* This will be called with 1 as parameter when the first map user
+ * needs VPP, and called with 0 when the last user exits. The map
+ * core maintains a reference counter, and assumes that VPP is a
+ * global resource applying to all mapped flash chips on the system.
+ */
void (*set_vpp)(struct map_info *, int);
unsigned long pfow_base;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cc84923011c0..771272187316 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -105,7 +105,6 @@ struct mtd_oob_ops {
struct nand_ecclayout {
__u32 eccbytes;
__u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
- __u32 oobavail;
struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
};
@@ -265,6 +264,11 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return mtd->dev.of_node;
}
+static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+{
+ return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
+}
+
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index bdd68e22b5a5..56574ba36555 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -168,6 +168,12 @@ typedef enum {
/* Device supports subpage reads */
#define NAND_SUBPAGE_READ 0x00001000
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING 0x00002000
+
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
@@ -666,7 +672,7 @@ struct nand_chip {
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
+ int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
int (*dev_ready)(struct mtd_info *mtd);
@@ -896,7 +902,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
* @chip_delay: R/B delay value in us
* @options: Option flags, e.g. 16bit buswidth
* @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @ecclayout: ECC layout info structure
* @part_probe_types: NULL-terminated array of probe types
*/
struct platform_nand_chip {
@@ -904,7 +909,6 @@ struct platform_nand_chip {
int chip_offset;
int nr_partitions;
struct mtd_partition *partitions;
- struct nand_ecclayout *ecclayout;
int chip_delay;
unsigned int options;
unsigned int bbt_options;
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index fb0bc3420a10..98f20ef05d60 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
/*
* Initialize BCH encoder/decoder
*/
-struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
- unsigned int eccbytes, struct nand_ecclayout **ecclayout);
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
/*
* Release BCH encoder/decoder resources
*/
@@ -58,9 +56,7 @@ nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
return -ENOTSUPP;
}
-static inline struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
- unsigned int eccbytes, struct nand_ecclayout **ecclayout)
+static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
return NULL;
}
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index b059629e22bc..044daa02b8ff 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -50,7 +50,6 @@ struct NFTLrecord {
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
- struct nand_ecclayout oobinfo;
};
int NFTL_mount(struct NFTLrecord *s);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 62356d50815b..3c36113a88e1 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -85,6 +85,7 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_TB BIT(5) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
@@ -116,6 +117,7 @@ enum spi_nor_ops {
enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
+ SNOR_F_HAS_SR_TB = BIT(1),
};
/**
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d0f25d81b46a..77d01700daf7 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -31,6 +31,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_PARENT 0x0010
#define LOOKUP_REVAL 0x0020
#define LOOKUP_RCU 0x0040
+#define LOOKUP_NO_REVAL 0x0080
/*
* Intent data
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 507e47c86737..5489ab756d1a 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -16,11 +16,16 @@
#include <linux/ndctl.h>
#include <linux/device.h>
+enum nvdimm_event {
+ NVDIMM_REVALIDATE_POISON,
+};
+
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
int (*probe)(struct device *dev);
int (*remove)(struct device *dev);
+ void (*notify)(struct device *dev, enum nvdimm_event event);
};
static inline struct nd_device_driver *to_nd_device_driver(
@@ -144,6 +149,8 @@ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
MODULE_ALIAS("nd:t" __stringify(type) "*")
#define ND_DEVICE_MODALIAS_FMT "nd:t%d"
+struct nd_region;
+void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
struct module *module, const char *mod_name);
#define nd_driver_register(driver) \
diff --git a/include/linux/net.h b/include/linux/net.h
index 0b4ac7da583a..49175e4ced11 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -215,6 +215,7 @@ int __sock_create(struct net *net, int family, int type, int proto,
int sock_create(int family, int type, int proto, struct socket **res);
int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
+struct socket *sock_alloc(void);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index d9654f0eecb3..a734bf43d190 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -67,6 +67,8 @@ enum {
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
+
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -124,6 +126,7 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5440b7b705eb..cb0d5d09c2e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
+#include <uapi/linux/pkt_cls.h>
struct netpoll_info;
struct device;
@@ -80,8 +81,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
- * higher layers. Virtual network devices transmit synchronously, in this case
- * the driver transmit return codes are consumed by dev_queue_xmit(), all
+ * higher layers. Virtual network devices transmit synchronously; in this case
+ * the driver transmit return codes are consumed by dev_queue_xmit(), and all
* others are propagated to higher layers.
*/
@@ -128,7 +129,7 @@ static inline bool dev_xmit_complete(int rc)
}
/*
- * Compute the worst case header length according to the protocols
+ * Compute the worst-case header length according to the protocols
* used.
*/
@@ -245,7 +246,7 @@ struct hh_cache {
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
-/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
+/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
@@ -267,10 +268,11 @@ struct header_ops {
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
+ bool (*validate)(const char *ll_header, unsigned int len);
};
/* These flag bits are private to the generic network queueing
- * layer, they may not be explicitly referenced by any other
+ * layer; they may not be explicitly referenced by any other
* code.
*/
@@ -284,7 +286,7 @@ enum netdev_state_t {
/*
- * This structure holds at boot time configured netdevice settings. They
+ * This structure holds boot-time configured netdevice settings. They
* are then used in the device probing.
*/
struct netdev_boot_setup {
@@ -302,7 +304,7 @@ struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
- * to the per-cpu poll_list, and whoever clears that bit
+ * to the per-CPU poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
@@ -348,7 +350,7 @@ typedef enum gro_result gro_result_t;
* @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
* case skb->dev was changed by rx_handler.
* @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
- * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
+ * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
*
* rx_handlers are functions called from inside __netif_receive_skb(), to do
* special processing of the skb, prior to delivery to protocol handlers.
@@ -363,19 +365,19 @@ typedef enum gro_result gro_result_t;
* Upon return, rx_handler is expected to tell __netif_receive_skb() what to
* do with the skb.
*
- * If the rx_handler consumed to skb in some way, it should return
+ * If the rx_handler consumed the skb in some way, it should return
* RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
- * the skb to be delivered in some other ways.
+ * the skb to be delivered in some other way.
*
* If the rx_handler changed skb->dev, to divert the skb to another
* net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
* new device will be called if it exists.
*
- * If the rx_handler consider the skb should be ignored, it should return
+ * If the rx_handler decides the skb should be ignored, it should return
* RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
* are registered on exact device (ptype->dev == skb->dev).
*
- * If the rx_handler didn't changed skb->dev, but want the skb to be normally
+ * If the rx_handler didn't change skb->dev, but wants the skb to be normally
* delivered, it should return RX_HANDLER_PASS.
*
* A device without a registered rx_handler will behave as if rx_handler
@@ -400,11 +402,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
}
/**
- * napi_schedule_prep - check if napi can be scheduled
- * @n: napi context
+ * napi_schedule_prep - check if NAPI can be scheduled
+ * @n: NAPI context
*
* Test if NAPI routine is already running, and if not mark
- * it as running. This is used as a condition variable
+ * it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
@@ -416,7 +418,7 @@ static inline bool napi_schedule_prep(struct napi_struct *n)
/**
* napi_schedule - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
@@ -429,7 +431,7 @@ static inline void napi_schedule(struct napi_struct *n)
/**
* napi_schedule_irqoff - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
@@ -453,7 +455,7 @@ void __napi_complete(struct napi_struct *n);
void napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
- * @n: napi context
+ * @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
@@ -465,32 +467,32 @@ static inline void napi_complete(struct napi_struct *n)
/**
* napi_hash_add - add a NAPI to global hashtable
- * @napi: napi context
+ * @napi: NAPI context
*
- * generate a new napi_id and store a @napi under it in napi_hash
- * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
+ * Generate a new napi_id and store a @napi under it in napi_hash.
+ * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
* Note: This is normally automatically done from netif_napi_add(),
- * so might disappear in a future linux version.
+ * so might disappear in a future Linux version.
*/
void napi_hash_add(struct napi_struct *napi);
/**
* napi_hash_del - remove a NAPI from global table
- * @napi: napi context
+ * @napi: NAPI context
*
- * Warning: caller must observe rcu grace period
+ * Warning: caller must observe RCU grace period
* before freeing memory containing @napi, if
* this function returns true.
* Note: core networking stack automatically calls it
- * from netif_napi_del()
+ * from netif_napi_del().
* Drivers might want to call this helper to combine all
- * the needed rcu grace periods into a single one.
+ * the needed RCU grace periods into a single one.
*/
bool napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
@@ -499,7 +501,7 @@ void napi_disable(struct napi_struct *n);
/**
* napi_enable - enable NAPI scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
@@ -514,7 +516,7 @@ static inline void napi_enable(struct napi_struct *n)
/**
* napi_synchronize - wait until NAPI is not running
- * @n: napi context
+ * @n: NAPI context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
@@ -557,7 +559,7 @@ enum netdev_queue_state_t {
struct netdev_queue {
/*
- * read mostly part
+ * read-mostly part
*/
struct net_device *dev;
struct Qdisc __rcu *qdisc;
@@ -569,7 +571,7 @@ struct netdev_queue {
int numa_node;
#endif
/*
- * write mostly part
+ * write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
@@ -646,11 +648,11 @@ struct rps_dev_flow_table {
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high order bits
- * of flow hash, lower part is cpu number.
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
- * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
@@ -672,7 +674,7 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
unsigned int index = hash & table->mask;
u32 val = hash & ~rps_cpu_mask;
- /* We only give a hint, preemption can change cpu under us */
+ /* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
if (table->ents[index] != val)
@@ -778,27 +780,48 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
+/* These structures hold the attributes of qdisc and classifiers
+ * that are being passed to the netdevice through the setup_tc op.
+ */
+enum {
+ TC_SETUP_MQPRIO,
+ TC_SETUP_CLSU32,
+ TC_SETUP_CLSFLOWER,
+};
+
+struct tc_cls_u32_offload;
+
+struct tc_to_netdev {
+ unsigned int type;
+ union {
+ u8 tc;
+ struct tc_cls_u32_offload *cls_u32;
+ struct tc_cls_flower_offload *cls_flower;
+ };
+};
+
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
- * This function is called once when network device is registered.
- * The network device can use this to any late stage initializaton
- * or semantic validattion. It can fail with an error code which will
- * be propogated back to register_netdev
+ * This function is called once when a network device is registered.
+ * The network device can use this for any late stage initialization
+ * or semantic validation. It can fail with an error code which will
+ * be propagated back to register_netdev.
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
- * This function is called when network device transistions to the up
+ * This function is called when a network device transitions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
- * This function is called when network device transistions to the down
+ * This function is called when a network device transitions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
@@ -809,7 +832,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
- * Required can not be NULL.
+ * Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
@@ -819,34 +842,34 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
- * Called to decide which queue to when device supports multiple
+ * Called to decide which queue to use when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
- * changes to configuration when multicast or promiscious is enabled.
+ * changes to configuration when multicast or promiscuous is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
* If driver handles unicast address filtering, it should set
- * IFF_UNICAST_FLT to its priv_flags.
+ * IFF_UNICAST_FLT in its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
- * mac address can not be changed.
+ * MAC address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user request an ioctl which can't be handled by
- * the generic interface code. If not defined ioctl's return
+ * Called when a user requests an ioctl which can't be handled by
+ * the generic interface code. If not defined ioctls return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
- * is retained for legacy reason, new devices should use the bus
+ * is retained for legacy reasons; new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
@@ -855,7 +878,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
- * Callback uses when the transmitter has not made any progress
+ * Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
@@ -873,11 +896,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* neither operation.
*
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is registered.
*
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
@@ -897,7 +920,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* Enable or disable the VF ability to query its RSS Redirection Table and
* Hash Key. This is needed since on some devices VF share this information
- * with PF and querying it may adduce a theoretical security risk.
+ * with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
@@ -1007,20 +1030,20 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __be16 port);
- * Called by vxlan to notiy a driver about the UDP port and socket
- * address family that vxlan is listnening to. It is called only when
+ * Called by vxlan to notify a driver about the UDP port and socket
+ * address family that vxlan is listening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_add_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify a driver about the UDP port and socket
* address family that geneve is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* geneve_net->sock_lock.
*
* void (*ndo_del_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify the driver about a UDP port and socket
* address family that geneve is not listening to anymore. The operation
* is protected by the geneve_net->sock_lock.
@@ -1049,9 +1072,9 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
@@ -1065,7 +1088,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
* void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
+ * bool proto_down);
* This function is used to pass protocol port error state information
* to the switch driver. The switch driver can react to the proto_down
* by doing a phys down on the associated switch port.
@@ -1073,6 +1096,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
* sampling packet.
+ * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
+ * This function is used to specify the headroom that the skb must
+ * consider when allocation skb during packet reception. Setting
+ * appropriate rx headroom value allows avoiding skb head copy on
+ * forward. Setting a negative value resets the rx headroom to the
+ * default value.
*
*/
struct net_device_ops {
@@ -1147,10 +1176,16 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_guid)(struct net_device *dev,
+ int vf, u64 guid,
+ int guid_type);
int (*ndo_set_vf_rss_query_en)(
struct net_device *dev,
int vf, bool setting);
- int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
+ int (*ndo_setup_tc)(struct net_device *dev,
+ u32 handle,
+ __be16 protocol,
+ struct tc_to_netdev *tc);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -1255,6 +1290,8 @@ struct net_device_ops {
bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
+ void (*ndo_set_rx_headroom)(struct net_device *dev,
+ int needed_headroom);
};
/**
@@ -1262,7 +1299,7 @@ struct net_device_ops {
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
- * userspace, this means that the order of these flags can change
+ * userspace; this means that the order of these flags can change
* during any kernel release.
*
* You should have a pretty good reason to be extending these flags.
@@ -1286,11 +1323,19 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
+ * underlying stacked devices
+ * @IFF_IPVLAN_MASTER: IPvlan master device
+ * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
* @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
* @IFF_TEAM: device is a team device
+ * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
+ * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
+ * entity (i.e. the master device for bridged veth)
+ * @IFF_MACSEC: device is a MACsec device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1318,6 +1363,9 @@ enum netdev_priv_flags {
IFF_OPENVSWITCH = 1<<22,
IFF_L3MDEV_SLAVE = 1<<23,
IFF_TEAM = 1<<24,
+ IFF_RXFH_CONFIGURED = 1<<25,
+ IFF_PHONY_HEADROOM = 1<<26,
+ IFF_MACSEC = 1<<27,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1345,6 +1393,8 @@ enum netdev_priv_flags {
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
#define IFF_TEAM IFF_TEAM
+#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
+#define IFF_MACSEC IFF_MACSEC
/**
* struct net_device - The DEVICE structure.
@@ -1367,10 +1417,12 @@ enum netdev_priv_flags {
*
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
- * @napi_list: List entry, that is used for polling napi devices
- * @unreg_list: List entry, that is used, when we are unregistering the
- * device, see the function unregister_netdev
- * @close_list: List entry, that is used, when we are closing the device
+ * @napi_list: List entry used for polling NAPI devices
+ * @unreg_list: List entry when we are unregistering the
+ * device; see the function unregister_netdev
+ * @close_list: List entry used when we are closing the device
+ * @ptype_all: Device-specific packet handlers for all protocols
+ * @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
* @all_adj_list: All linked devices, *including* neighbours
@@ -1388,7 +1440,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @group: The group, that the device belongs to
+ * @group: The group the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1397,6 +1449,8 @@ enum netdev_priv_flags {
* do not use this in drivers
* @tx_dropped: Dropped packets by core network,
* do not use this in drivers
+ * @rx_nohandler: nohandler dropped packets by core network on
+ * inactive devices, do not use this in drivers
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
@@ -1420,8 +1474,7 @@ enum netdev_priv_flags {
* @dma: DMA channel
* @mtu: Interface MTU value
* @type: Interface hardware type
- * @hard_header_len: Hardware header length, which means that this is the
- * minimum size of a packet.
+ * @hard_header_len: Maximum hardware header length.
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -1441,7 +1494,7 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc_promisc: Counter, that indicates, that promiscuous mode
+ * @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
@@ -1449,9 +1502,9 @@ enum netdev_priv_flags {
* @mc: multicast mac addresses
* @dev_addrs: list of device hw addresses
* @queues_kset: Group of all Kobjects in the Tx and RX queues
- * @promiscuity: Number of times, the NIC is told to work in
- * Promiscuous mode, if it becomes 0 the NIC will
- * exit from working in Promiscuous mode
+ * @promiscuity: Number of times the NIC is told to work in
+ * promiscuous mode; if it becomes 0 the NIC will
+ * exit promiscuous mode
* @allmulti: Counter, enables or disables allmulticast mode
*
* @vlan_info: VLAN info
@@ -1497,7 +1550,7 @@ enum netdev_priv_flags {
*
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
- * the watchdog ( see dev_watchdog() )
+ * the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
* @pcpu_refcnt: Number of references to this device
@@ -1611,10 +1664,11 @@ struct net_device {
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
+ atomic_long_t rx_nohandler;
#ifdef CONFIG_WIRELESS_EXT
- const struct iw_handler_def * wireless_handlers;
- struct iw_public_data * wireless_data;
+ const struct iw_handler_def *wireless_handlers;
+ struct iw_public_data *wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
@@ -1667,7 +1721,7 @@ struct net_device {
unsigned int allmulti;
- /* Protocol specific pointers */
+ /* Protocol-specific pointers */
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
@@ -1697,13 +1751,11 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
-
#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-
#endif
unsigned long gro_flush_timeout;
@@ -1795,7 +1847,7 @@ struct net_device {
struct garp_port __rcu *garp_port;
struct mrp_port __rcu *mrp_port;
- struct device dev;
+ struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
@@ -1810,9 +1862,9 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
- u8 prio_tc_map[TC_BITMASK + 1];
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
unsigned int fcoe_ddp_xid;
@@ -1820,9 +1872,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
- struct phy_device *phydev;
- struct lock_class_key *qdisc_tx_busylock;
- bool proto_down;
+ struct phy_device *phydev;
+ struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1908,6 +1960,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
+/* returns the headroom that the master device needs to take in account
+ * when forwarding to this dev
+ */
+static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
+}
+
+static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ if (dev->netdev_ops->ndo_set_rx_headroom)
+ dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
+}
+
+/* set the device rx headroom to the dev's default */
+static inline void netdev_reset_rx_headroom(struct net_device *dev)
+{
+ netdev_set_rx_headroom(dev, -1);
+}
+
/*
* Net namespace inlines
*/
@@ -1950,7 +2022,7 @@ static inline void *netdev_priv(const struct net_device *dev)
/* Set the sysfs device type for the network logical device to allow
* fine-grained identification of different network device types. For
- * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
+ * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
@@ -1960,22 +2032,22 @@ static inline void *netdev_priv(const struct net_device *dev)
#define NAPI_POLL_WEIGHT 64
/**
- * netif_napi_add - initialize a napi context
+ * netif_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
- * netif_napi_add() must be used to initialize a napi context prior to calling
- * *any* of the other napi related functions.
+ * netif_napi_add() must be used to initialize a NAPI context prior to calling
+ * *any* of the other NAPI-related functions.
*/
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
- * netif_tx_napi_add - initialize a napi context
+ * netif_tx_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
@@ -1993,22 +2065,22 @@ static inline void netif_tx_napi_add(struct net_device *dev,
}
/**
- * netif_napi_del - remove a napi context
- * @napi: napi context
+ * netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
*
- * netif_napi_del() removes a napi context from the network device napi list
+ * netif_napi_del() removes a NAPI context from the network device NAPI list
*/
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
+ void *frag0;
/* Length of frag0. */
unsigned int frag0_len;
/* This indicates where we are processing relative to skb->data. */
- int data_offset;
+ int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
@@ -2031,8 +2103,8 @@ struct napi_gro_cb {
/* This is non-zero if the packet may be of the same flow. */
u8 same_flow:1;
- /* Used in udp_gro_receive */
- u8 udp_mark:1;
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
/* GRO checksum is valid */
u8 csum_valid:1;
@@ -2104,7 +2176,7 @@ struct udp_offload {
struct udp_offload_callbacks callbacks;
};
-/* often modified stats are per cpu, other are shared (netdev->stats) */
+/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
u64 rx_bytes;
@@ -2201,7 +2273,7 @@ struct netdev_notifier_changeupper_info {
struct netdev_notifier_info info; /* must be first */
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
- bool linking; /* is the nofication for link or unlink */
+ bool linking; /* is the notification for link or unlink */
void *upper_info; /* upper dev info */
};
@@ -2627,6 +2699,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+/* ll_header must have at least hard_header_len allocated */
+static inline bool dev_validate_header(const struct net_device *dev,
+ char *ll_header, int len)
+{
+ if (likely(len >= dev->hard_header_len))
+ return true;
+
+ if (capable(CAP_SYS_RAWIO)) {
+ memset(ll_header + len, 0, dev->hard_header_len - len);
+ return true;
+ }
+
+ if (dev->header_ops && dev->header_ops->validate)
+ return dev->header_ops->validate(ll_header, len);
+
+ return false;
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2648,7 +2738,7 @@ extern int netdev_flow_limit_table_len;
#endif /* CONFIG_NET_FLOW_LIMIT */
/*
- * Incoming packets are placed on per-cpu queues
+ * Incoming packets are placed on per-CPU queues
*/
struct softnet_data {
struct list_head poll_list;
@@ -2818,7 +2908,7 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their ndo_start_xmit(),
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
@@ -2832,7 +2922,7 @@ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_que
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their TX completion path,
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
@@ -2971,7 +3061,7 @@ static inline bool netif_running(const struct net_device *dev)
}
/*
- * Routines to manage the subqueues on a device. We only need start
+ * Routines to manage the subqueues on a device. We only need start,
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
@@ -3255,7 +3345,6 @@ void netif_carrier_off(struct net_device *dev);
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
- *
*/
static inline void netif_dormant_on(struct net_device *dev)
{
@@ -3590,7 +3679,7 @@ void dev_uc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_uc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3606,7 +3695,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_uc_sync().
- **/
+ */
static inline void __dev_uc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
@@ -3634,7 +3723,7 @@ void dev_mc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_mc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3650,7 +3739,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_mc_sync().
- **/
+ */
static inline void __dev_mc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
@@ -3741,7 +3830,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
-extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev,
@@ -3965,6 +4054,11 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
skb->mac_len = mac_len;
}
+static inline bool netif_is_macsec(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACSEC;
+}
+
static inline bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
@@ -4045,6 +4139,11 @@ static inline bool netif_is_lag_port(const struct net_device *dev)
return netif_is_bond_slave(dev) || netif_is_team_port(dev);
}
+static inline bool netif_is_rxfh_configured(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_RXFH_CONFIGURED;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0ad556726181..9230f9aee896 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -141,22 +141,6 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
-static inline bool nf_hook_list_active(struct list_head *hook_list,
- u_int8_t pf, unsigned int hook)
-{
- if (__builtin_constant_p(pf) &&
- __builtin_constant_p(hook))
- return static_key_false(&nf_hooks_needed[pf][hook]);
-
- return !list_empty(hook_list);
-}
-#else
-static inline bool nf_hook_list_active(struct list_head *hook_list,
- u_int8_t pf, unsigned int hook)
-{
- return !list_empty(hook_list);
-}
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
@@ -177,9 +161,18 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- struct list_head *hook_list = &net->nf.hooks[pf][hook];
+ struct list_head *hook_list;
+
+#ifdef HAVE_JUMP_LABEL
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook) &&
+ !static_key_false(&nf_hooks_needed[pf][hook]))
+ return 1;
+#endif
+
+ hook_list = &net->nf.hooks[pf][hook];
- if (nf_hook_list_active(hook_list, pf, hook)) {
+ if (!list_empty(hook_list)) {
struct nf_hook_state state;
nf_hook_state_init(&state, hook_list, hook, thresh,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index ba0d9789eb6e..1d82dd5e9a08 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -34,8 +34,6 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
int nfnetlink_has_listeners(struct net *net, unsigned int group);
-struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c5577410c25d..80a305b85323 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -200,6 +200,9 @@ struct xt_table {
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
+ /* called when table is needed in the given netns */
+ int (*table_init)(struct net *net);
+
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -408,8 +411,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
return cnt;
}
-struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 6f074db2f23d..029b95e8924e 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -48,10 +48,11 @@ struct arpt_error {
}
extern void *arpt_alloc_initial_table(const struct xt_table *);
-extern struct xt_table *arpt_register_table(struct net *net,
- const struct xt_table *table,
- const struct arpt_replace *repl);
-extern void arpt_unregister_table(struct xt_table *table);
+int arpt_register_table(struct net *net, const struct xt_table *table,
+ const struct arpt_replace *repl,
+ const struct nf_hook_ops *ops, struct xt_table **res);
+void arpt_unregister_table(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
extern unsigned int arpt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index aa598f942c01..7bfc5893ec31 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -24,10 +24,11 @@
extern void ipt_init(void) __init;
-extern struct xt_table *ipt_register_table(struct net *net,
- const struct xt_table *table,
- const struct ipt_replace *repl);
-extern void ipt_unregister_table(struct net *net, struct xt_table *table);
+int ipt_register_table(struct net *net, const struct xt_table *table,
+ const struct ipt_replace *repl,
+ const struct nf_hook_ops *ops, struct xt_table **res);
+void ipt_unregister_table(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
/* Standard entry. */
struct ipt_standard {
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 0f76e5c674f9..b21c392d6012 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -25,10 +25,11 @@
extern void ip6t_init(void) __init;
extern void *ip6t_alloc_initial_table(const struct xt_table *);
-extern struct xt_table *ip6t_register_table(struct net *net,
- const struct xt_table *table,
- const struct ip6t_replace *repl);
-extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
+int ip6t_register_table(struct net *net, const struct xt_table *table,
+ const struct ip6t_replace *repl,
+ const struct nf_hook_ops *ops, struct xt_table **res);
+void ip6t_unregister_table(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 0b41959aab9f..da14ab61f363 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -69,16 +69,6 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
- unsigned int ldiff, u32 dst_portid,
- gfp_t gfp_mask);
-static inline struct sk_buff *
-netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
- gfp_t gfp_mask)
-{
- return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
-}
-
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index d6f9b4e6006d..011433478a14 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -529,6 +529,7 @@ enum pnfs_layouttype {
LAYOUT_OSD2_OBJECTS = 2,
LAYOUT_BLOCK_VOLUME = 3,
LAYOUT_FLEX_FILES = 4,
+ LAYOUT_SCSI = 5,
LAYOUT_TYPE_MAX
};
@@ -555,6 +556,7 @@ enum pnfs_block_volume_type {
PNFS_BLOCK_VOLUME_SLICE = 1,
PNFS_BLOCK_VOLUME_CONCAT = 2,
PNFS_BLOCK_VOLUME_STRIPE = 3,
+ PNFS_BLOCK_VOLUME_SCSI = 4,
};
enum pnfs_block_extent_state {
@@ -568,6 +570,23 @@ enum pnfs_block_extent_state {
#define PNFS_BLOCK_EXTENT_SIZE \
(7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
+/* on the wire size of a scsi commit range */
+#define PNFS_SCSI_RANGE_SIZE \
+ (4 * sizeof(__be32))
+
+enum scsi_code_set {
+ PS_CODE_SET_BINARY = 1,
+ PS_CODE_SET_ASCII = 2,
+ PS_CODE_SET_UTF8 = 3
+};
+
+enum scsi_designator_type {
+ PS_DESIGNATOR_T10 = 1,
+ PS_DESIGNATOR_EUI64 = 2,
+ PS_DESIGNATOR_NAA = 3,
+ PS_DESIGNATOR_NAME = 8
+};
+
#define NFL4_UFLG_MASK 0x0000003F
#define NFL4_UFLG_DENSE 0x00000001
#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 7ec5b86735f3..4630eeae18e0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -65,7 +65,6 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
-int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(int watchdog_thresh);
extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index d14a4c362465..4149868de4e6 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -47,6 +47,8 @@
* runtime initialization.
*/
+struct notifier_block;
+
typedef int (*notifier_fn_t)(struct notifier_block *nb,
unsigned long action, void *data);
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 35fa08fd7739..ac0d65bef5d0 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -8,6 +8,7 @@ struct mnt_namespace;
struct uts_namespace;
struct ipc_namespace;
struct pid_namespace;
+struct cgroup_namespace;
struct fs_struct;
/*
@@ -33,6 +34,7 @@ struct nsproxy {
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
+ struct cgroup_namespace *cgroup_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index f798e2afba88..6f47562d477b 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -284,7 +284,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* ops->db_read_mask && */
ops->db_set_mask &&
ops->db_clear_mask &&
- ops->peer_db_addr &&
+ /* ops->peer_db_addr && */
/* ops->peer_db_read && */
ops->peer_db_set &&
/* ops->peer_db_clear && */
@@ -295,7 +295,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
ops->spad_count &&
ops->spad_read &&
ops->spad_write &&
- ops->peer_spad_addr &&
+ /* ops->peer_spad_addr && */
/* ops->peer_spad_read && */
ops->peer_spad_write &&
1;
@@ -757,6 +757,9 @@ static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
resource_size_t *db_size)
{
+ if (!ntb->ops->peer_db_addr)
+ return -EINVAL;
+
return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
}
@@ -948,6 +951,9 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
phys_addr_t *spad_addr)
{
+ if (!ntb->ops->peer_spad_addr)
+ return -EINVAL;
+
return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
}
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 0b68caff1b3c..a4fcc90b0f20 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -23,6 +23,10 @@ struct nvmem_config {
const struct nvmem_cell_info *cells;
int ncells;
bool read_only;
+ bool root_only;
+ /* To be only used by old driver/misc/eeprom drivers */
+ bool compat;
+ struct device *base_dev;
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -43,5 +47,4 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem)
}
#endif /* CONFIG_NVMEM */
-
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index dc6e39696b64..7fcb681baadf 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -296,13 +296,13 @@ extern int of_property_read_u64_array(const struct device_node *np,
u64 *out_values,
size_t sz);
-extern int of_property_read_string(struct device_node *np,
+extern int of_property_read_string(const struct device_node *np,
const char *propname,
const char **out_string);
-extern int of_property_match_string(struct device_node *np,
+extern int of_property_match_string(const struct device_node *np,
const char *propname,
const char *string);
-extern int of_property_read_string_helper(struct device_node *np,
+extern int of_property_read_string_helper(const struct device_node *np,
const char *propname,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
@@ -538,14 +538,14 @@ static inline int of_property_read_u64_array(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_string(struct device_node *np,
+static inline int of_property_read_string(const struct device_node *np,
const char *propname,
const char **out_string)
{
return -ENOSYS;
}
-static inline int of_property_read_string_helper(struct device_node *np,
+static inline int of_property_read_string_helper(const struct device_node *np,
const char *propname,
const char **out_strs, size_t sz, int index)
{
@@ -571,7 +571,7 @@ static inline int of_property_read_u64(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_match_string(struct device_node *np,
+static inline int of_property_match_string(const struct device_node *np,
const char *propname,
const char *string)
{
@@ -773,7 +773,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
*
* If @out_strs is NULL, the number of strings in the property is returned.
*/
-static inline int of_property_read_string_array(struct device_node *np,
+static inline int of_property_read_string_array(const struct device_node *np,
const char *propname, const char **out_strs,
size_t sz)
{
@@ -792,7 +792,7 @@ static inline int of_property_read_string_array(struct device_node *np,
* does not have a value, and -EILSEQ if the string is not null-terminated
* within the length of the property data.
*/
-static inline int of_property_count_strings(struct device_node *np,
+static inline int of_property_count_strings(const struct device_node *np,
const char *propname)
{
return of_property_read_string_helper(np, propname, NULL, 0, 0);
@@ -816,7 +816,7 @@ static inline int of_property_count_strings(struct device_node *np,
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
-static inline int of_property_read_string_index(struct device_node *np,
+static inline int of_property_read_string_index(const struct device_node *np,
const char *propname,
int index, const char **output)
{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index df9ef3801812..2fbe8682a66f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -88,7 +88,7 @@ extern void unflatten_device_tree(void);
extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
-extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern u64 of_flat_dt_translate_address(unsigned long node);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 7dee00143afd..d833eb4dd446 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -51,6 +51,9 @@ struct gpmc_timings {
u32 adv_on; /* Assertion time */
u32 adv_rd_off; /* Read deassertion time */
u32 adv_wr_off; /* Write deassertion time */
+ u32 adv_aad_mux_on; /* ADV assertion time for AAD */
+ u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */
+ u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */
/* WE signals timings corresponding to GPMC_CONFIG4 */
u32 we_on; /* WE assertion time */
@@ -59,6 +62,8 @@ struct gpmc_timings {
/* OE signals timings corresponding to GPMC_CONFIG4 */
u32 oe_on; /* OE assertion time */
u32 oe_off; /* OE deassertion time */
+ u32 oe_aad_mux_on; /* OE assertion time for AAD */
+ u32 oe_aad_mux_off; /* OE deassertion time for AAD */
/* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
u32 page_burst_access; /* Multiple access word delay */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 03e6257321f0..628a43242a34 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -76,8 +76,6 @@ extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
-extern int oom_kills_count(void);
-extern void note_oom_kill(void);
extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
unsigned int points, unsigned long totalpages,
struct mem_cgroup *memcg, const char *message);
@@ -91,7 +89,7 @@ extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
extern bool out_of_memory(struct oom_control *oc);
-extern void exit_oom_victim(void);
+extern void exit_oom_victim(struct task_struct *tsk);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index da523661500a..77b078c103b2 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -17,6 +17,8 @@
#define ZONES_SHIFT 1
#elif MAX_NR_ZONES <= 4
#define ZONES_SHIFT 2
+#elif MAX_NR_ZONES <= 8
+#define ZONES_SHIFT 3
#else
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 19724e6ebd26..f4ed4f1b0c77 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -144,12 +144,12 @@ static inline struct page *compound_head(struct page *page)
return page;
}
-static inline int PageTail(struct page *page)
+static __always_inline int PageTail(struct page *page)
{
return READ_ONCE(page->compound_head) & 1;
}
-static inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(struct page *page)
{
return test_bit(PG_head, &page->flags) || PageTail(page);
}
@@ -184,31 +184,31 @@ static inline int PageCompound(struct page *page)
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
-static inline int Page##uname(struct page *page) \
+static __always_inline int Page##uname(struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
-static inline void SetPage##uname(struct page *page) \
+static __always_inline void SetPage##uname(struct page *page) \
{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
-static inline void ClearPage##uname(struct page *page) \
+static __always_inline void ClearPage##uname(struct page *page) \
{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
-static inline void __SetPage##uname(struct page *page) \
+static __always_inline void __SetPage##uname(struct page *page) \
{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
-static inline void __ClearPage##uname(struct page *page) \
+static __always_inline void __ClearPage##uname(struct page *page) \
{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
-static inline int TestSetPage##uname(struct page *page) \
+static __always_inline int TestSetPage##uname(struct page *page) \
{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
-static inline int TestClearPage##uname(struct page *page) \
+static __always_inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define PAGEFLAG(uname, lname, policy) \
@@ -371,7 +371,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
#define PAGE_MAPPING_KSM 2
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
-static inline int PageAnon(struct page *page)
+static __always_inline int PageAnon(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -384,7 +384,7 @@ static inline int PageAnon(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static inline int PageKsm(struct page *page)
+static __always_inline int PageKsm(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -415,14 +415,14 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static inline void __SetPageUptodate(struct page *page)
+static __always_inline void __SetPageUptodate(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
__set_bit(PG_uptodate, &page->flags);
}
-static inline void SetPageUptodate(struct page *page)
+static __always_inline void SetPageUptodate(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
/*
@@ -456,12 +456,12 @@ static inline void set_page_writeback_keepwrite(struct page *page)
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
-static inline void set_compound_head(struct page *page, struct page *head)
+static __always_inline void set_compound_head(struct page *page, struct page *head)
{
WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
}
-static inline void clear_compound_head(struct page *page)
+static __always_inline void clear_compound_head(struct page *page)
{
WRITE_ONCE(page->compound_head, 0);
}
@@ -593,6 +593,8 @@ static inline void __ClearPageBuddy(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+extern bool is_free_buddy_page(struct page *page);
+
#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
static inline int PageBalloon(struct page *page)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 17f118a82854..e1fe7cf5bddf 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -45,6 +45,7 @@ struct page_ext {
unsigned int order;
gfp_t gfp_mask;
unsigned int nr_entries;
+ int last_migrate_reason;
unsigned long trace_entries[8];
#endif
};
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index cacaabea8a09..46f1b939948c 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -1,38 +1,54 @@
#ifndef __LINUX_PAGE_OWNER_H
#define __LINUX_PAGE_OWNER_H
+#include <linux/jump_label.h>
+
#ifdef CONFIG_PAGE_OWNER
-extern bool page_owner_inited;
+extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
extern gfp_t __get_page_owner_gfp(struct page *page);
+extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __dump_page_owner(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
- if (likely(!page_owner_inited))
- return;
-
- __reset_page_owner(page, order);
+ if (static_branch_unlikely(&page_owner_inited))
+ __reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
- if (likely(!page_owner_inited))
- return;
-
- __set_page_owner(page, order, gfp_mask);
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner(page, order, gfp_mask);
}
static inline gfp_t get_page_owner_gfp(struct page *page)
{
- if (likely(!page_owner_inited))
+ if (static_branch_unlikely(&page_owner_inited))
+ return __get_page_owner_gfp(page);
+ else
return 0;
-
- return __get_page_owner_gfp(page);
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __copy_page_owner(oldpage, newpage);
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner_migrate_reason(page, reason);
+}
+static inline void dump_page_owner(struct page *page)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __dump_page_owner(page);
}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
@@ -46,6 +62,14 @@ static inline gfp_t get_page_owner_gfp(struct page *page)
{
return 0;
}
-
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+}
+static inline void dump_page_owner(struct page *page)
+{
+}
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
new file mode 100644
index 000000000000..e596d5d9540e
--- /dev/null
+++ b/include/linux/page_ref.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_PAGE_REF_H
+#define _LINUX_PAGE_REF_H
+
+#include <linux/atomic.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
+#include <linux/tracepoint-defs.h>
+
+extern struct tracepoint __tracepoint_page_ref_set;
+extern struct tracepoint __tracepoint_page_ref_mod;
+extern struct tracepoint __tracepoint_page_ref_mod_and_test;
+extern struct tracepoint __tracepoint_page_ref_mod_and_return;
+extern struct tracepoint __tracepoint_page_ref_mod_unless;
+extern struct tracepoint __tracepoint_page_ref_freeze;
+extern struct tracepoint __tracepoint_page_ref_unfreeze;
+
+#ifdef CONFIG_DEBUG_PAGE_REF
+
+/*
+ * Ideally we would want to use the trace_<tracepoint>_enabled() helper
+ * functions. But due to include header file issues, that is not
+ * feasible. Instead we have to open code the static key functions.
+ *
+ * See trace_##name##_enabled(void) in include/linux/tracepoint.h
+ */
+#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
+
+extern void __page_ref_set(struct page *page, int v);
+extern void __page_ref_mod(struct page *page, int v);
+extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
+extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
+extern void __page_ref_mod_unless(struct page *page, int v, int u);
+extern void __page_ref_freeze(struct page *page, int v, int ret);
+extern void __page_ref_unfreeze(struct page *page, int v);
+
+#else
+
+#define page_ref_tracepoint_active(t) false
+
+static inline void __page_ref_set(struct page *page, int v)
+{
+}
+static inline void __page_ref_mod(struct page *page, int v)
+{
+}
+static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
+{
+}
+static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
+{
+}
+static inline void __page_ref_mod_unless(struct page *page, int v, int u)
+{
+}
+static inline void __page_ref_freeze(struct page *page, int v, int ret)
+{
+}
+static inline void __page_ref_unfreeze(struct page *page, int v)
+{
+}
+
+#endif
+
+static inline int page_ref_count(struct page *page)
+{
+ return atomic_read(&page->_count);
+}
+
+static inline int page_count(struct page *page)
+{
+ return atomic_read(&compound_head(page)->_count);
+}
+
+static inline void set_page_count(struct page *page, int v)
+{
+ atomic_set(&page->_count, v);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
+ __page_ref_set(page, v);
+}
+
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ set_page_count(page, 1);
+}
+
+static inline void page_ref_add(struct page *page, int nr)
+{
+ atomic_add(nr, &page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, nr);
+}
+
+static inline void page_ref_sub(struct page *page, int nr)
+{
+ atomic_sub(nr, &page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, -nr);
+}
+
+static inline void page_ref_inc(struct page *page)
+{
+ atomic_inc(&page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, 1);
+}
+
+static inline void page_ref_dec(struct page *page)
+{
+ atomic_dec(&page->_count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ __page_ref_mod(page, -1);
+}
+
+static inline int page_ref_sub_and_test(struct page *page, int nr)
+{
+ int ret = atomic_sub_and_test(nr, &page->_count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ __page_ref_mod_and_test(page, -nr, ret);
+ return ret;
+}
+
+static inline int page_ref_dec_and_test(struct page *page)
+{
+ int ret = atomic_dec_and_test(&page->_count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ __page_ref_mod_and_test(page, -1, ret);
+ return ret;
+}
+
+static inline int page_ref_dec_return(struct page *page)
+{
+ int ret = atomic_dec_return(&page->_count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -1, ret);
+ return ret;
+}
+
+static inline int page_ref_add_unless(struct page *page, int nr, int u)
+{
+ int ret = atomic_add_unless(&page->_count, nr, u);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
+ __page_ref_mod_unless(page, nr, ret);
+ return ret;
+}
+
+static inline int page_ref_freeze(struct page *page, int count)
+{
+ int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
+ __page_ref_freeze(page, count, ret);
+ return ret;
+}
+
+static inline void page_ref_unfreeze(struct page *page, int count)
+{
+ VM_BUG_ON_PAGE(page_count(page) != 0, page);
+ VM_BUG_ON(count == 0);
+
+ atomic_set(&page->_count, count);
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
+ __page_ref_unfreeze(page, count);
+}
+
+#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 92395a0a7dc5..1ebd65c91422 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -165,7 +165,7 @@ static inline int page_cache_get_speculative(struct page *page)
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- atomic_inc(&page->_count);
+ page_ref_inc(page);
#else
if (unlikely(!get_page_unless_zero(page))) {
@@ -194,10 +194,10 @@ static inline int page_cache_add_speculative(struct page *page, int count)
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- atomic_add(count, &page->_count);
+ page_ref_add(page, count);
#else
- if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
+ if (unlikely(!page_ref_add_unless(page, count, 0)))
return 0;
#endif
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
@@ -205,19 +205,6 @@ static inline int page_cache_add_speculative(struct page *page, int count)
return 1;
}
-static inline int page_freeze_refs(struct page *page, int count)
-{
- return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
-}
-
-static inline void page_unfreeze_refs(struct page *page, int count)
-{
- VM_BUG_ON_PAGE(page_count(page) != 0, page);
- VM_BUG_ON(count == 0);
-
- atomic_set(&page->_count, count);
-}
-
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
@@ -663,8 +650,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow,
- struct mem_cgroup *memcg);
+extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
new file mode 100644
index 000000000000..39726caef5b1
--- /dev/null
+++ b/include/linux/pci-dma-compat.h
@@ -0,0 +1,147 @@
+/* include this file if the platform implements the dma_ DMA Mapping API
+ * and wants to provide the pci_ DMA Mapping API in terms of it */
+
+#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
+#define _ASM_GENERIC_PCI_DMA_COMPAT_H
+
+#include <linux/dma-mapping.h>
+
+/* This defines the direction arg to the DMA mapping routines. */
+#define PCI_DMA_BIDIRECTIONAL 0
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#define PCI_DMA_NONE 3
+
+static inline void *
+pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
+}
+
+static inline void *
+pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
+ size, dma_handle, GFP_ATOMIC);
+}
+
+static inline void
+pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
+}
+
+static inline dma_addr_t
+pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+ return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
+}
+
+static inline dma_addr_t
+pci_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
+ size_t size, int direction)
+{
+ dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
+}
+
+static inline int
+pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+}
+
+static inline int
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
+{
+ return dma_mapping_error(&pdev->dev, dma_addr);
+}
+
+#ifdef CONFIG_PCI
+static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{
+ return dma_set_mask(&dev->dev, mask);
+}
+
+static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{
+ return dma_set_coherent_mask(&dev->dev, mask);
+}
+
+static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
+ unsigned int size)
+{
+ return dma_set_max_seg_size(&dev->dev, size);
+}
+
+static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
+ unsigned long mask)
+{
+ return dma_set_seg_boundary(&dev->dev, mask);
+}
+#else
+static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{ return -EIO; }
+static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{ return -EIO; }
+static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
+ unsigned int size)
+{ return -EIO; }
+static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
+ unsigned long mask)
+{ return -EIO; }
+#endif
+
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27df4a6585da..004b8133417d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -70,12 +70,6 @@ enum pci_mmap_state {
pci_mmap_mem
};
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL 0
-#define PCI_DMA_TODEVICE 1
-#define PCI_DMA_FROMDEVICE 2
-#define PCI_DMA_NONE 3
-
/*
* For PCI devices, the region numbers are assigned this way:
*/
@@ -359,6 +353,7 @@ struct pci_dev {
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
+ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -578,6 +573,8 @@ static inline int pcibios_err_to_errno(int err)
/* Low-level architecture-dependent routines */
struct pci_ops {
+ int (*add_bus)(struct pci_bus *bus);
+ void (*remove_bus)(struct pci_bus *bus);
void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
@@ -746,9 +743,26 @@ struct pci_driver {
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+enum {
+ PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
+ PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
+ PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
+ PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
+ PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
+ PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
+ PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
+};
+
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
+extern unsigned int pci_flags;
+
+static inline void pci_set_flags(int flags) { pci_flags = flags; }
+static inline void pci_add_flags(int flags) { pci_flags |= flags; }
+static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
+static inline int pci_has_flag(int flag) { return pci_flags & flag; }
+
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
@@ -770,6 +784,7 @@ extern struct list_head pci_root_buses; /* list of all known PCI buses */
int no_pci_devices(void);
void pcibios_resource_survey_bus(struct pci_bus *bus);
+void pcibios_bus_add_device(struct pci_dev *pdev);
void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
@@ -988,23 +1003,6 @@ static inline int pci_is_managed(struct pci_dev *pdev)
return pdev->is_managed;
}
-static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
-{
- pdev->irq = irq;
- pdev->irq_managed = 1;
-}
-
-static inline void pci_reset_managed_irq(struct pci_dev *pdev)
-{
- pdev->irq = 0;
- pdev->irq_managed = 0;
-}
-
-static inline bool pci_has_managed_irq(struct pci_dev *pdev)
-{
- return pdev->irq_managed && pdev->irq > 0;
-}
-
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
@@ -1021,8 +1019,6 @@ void pci_intx(struct pci_dev *dev, int enable);
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
-int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
-int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
@@ -1238,6 +1234,7 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
+
/* kmem_cache style wrapper around pci_alloc_consistent() */
#include <linux/pci-dma.h>
@@ -1405,6 +1402,11 @@ void pci_register_set_vga_state(arch_set_vga_state_t func);
#else /* CONFIG_PCI is not enabled */
+static inline void pci_set_flags(int flags) { }
+static inline void pci_add_flags(int flags) { }
+static inline void pci_clear_flags(int flags) { }
+static inline int pci_has_flag(int flag) { return 0; }
+
/*
* If the system does not have PCI, clearly these return errors. Define
* these as simple inline functions to avoid hair in drivers.
@@ -1444,16 +1446,6 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{ return -EIO; }
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{ return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
@@ -1515,6 +1507,10 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h>
+#ifndef pci_root_bus_fwnode
+#define pci_root_bus_fwnode(bus) NULL
+#endif
+
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
@@ -1738,6 +1734,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
+int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
@@ -1754,6 +1752,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
+static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+{
+ return -ENOSYS;
+}
+static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
+ int id, int reset) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
@@ -1834,12 +1838,13 @@ bool pci_acs_path_enabled(struct pci_dev *start,
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
/* Small Resource Data Type Tag Item Names */
-#define PCI_VPD_STIN_END 0x78 /* End */
+#define PCI_VPD_STIN_END 0x0f /* End */
-#define PCI_VPD_SRDT_END PCI_VPD_STIN_END
+#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
#define PCI_VPD_SRDT_TIN_MASK 0x78
#define PCI_VPD_SRDT_LEN_MASK 0x07
+#define PCI_VPD_LRDT_TIN_MASK 0x7f
#define PCI_VPD_LRDT_TAG_SIZE 3
#define PCI_VPD_SRDT_TAG_SIZE 1
@@ -1863,6 +1868,17 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
}
/**
+ * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
+ * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
+ *
+ * Returns the extracted Large Resource Data Type Tag item.
+ */
+static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
+{
+ return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
+}
+
+/**
* pci_vpd_srdt_size - Extracts the Small Resource Data Type length
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
@@ -1874,6 +1890,17 @@ static inline u8 pci_vpd_srdt_size(const u8 *srdt)
}
/**
+ * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
+ * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
+ *
+ * Returns the extracted Small Resource Data Type Tag Item.
+ */
+static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
+{
+ return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
+}
+
+/**
* pci_vpd_info_field_size - Extracts the information field length
* @lrdt: Pointer to the beginning of an information field header
*
@@ -1989,4 +2016,8 @@ static inline bool pci_ari_enabled(struct pci_bus *bus)
{
return bus->self && bus->self->ari_enabled;
}
+
+/* provide the legacy pci_dma_* API */
+#include <linux/pci-dma-compat.h>
+
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 37f05cb1dfd6..247da8c95860 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -110,6 +110,7 @@
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
+#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
@@ -2506,6 +2507,10 @@
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
+#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
+#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
+#define PCI_SUBDEVICE_ID_QEMU 0x1100
+
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 83b5e34c6580..4196c90a3c88 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -104,9 +104,11 @@ struct arm_pmu {
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
+ bool secure_access; /* 32-bit ARM only */
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct notifier_block hotplug_nb;
+ struct notifier_block cpu_pm_nb;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b35a61a481fa..f291275ffd71 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -121,6 +121,7 @@ struct hw_perf_event {
struct { /* intel_cqm */
int cqm_state;
u32 cqm_rmid;
+ int is_group_event;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
@@ -128,6 +129,10 @@ struct hw_perf_event {
struct { /* itrace */
int itrace_started;
};
+ struct { /* amd_power */
+ u64 pwr_acc;
+ u64 ptsc;
+ };
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
@@ -397,6 +402,7 @@ struct pmu {
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
+ PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
@@ -467,6 +473,7 @@ struct perf_event {
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
+ void *pmu_private;
enum perf_event_active_state state;
unsigned int attach_state;
@@ -905,7 +912,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
}
}
-extern struct static_key_deferred perf_sched_events;
+extern struct static_key_false perf_sched_events;
static __always_inline bool
perf_sw_migrate_enabled(void)
@@ -924,7 +931,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_key_false(&perf_sched_events.key))
+ if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@ -941,7 +948,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
- if (static_key_false(&perf_sched_events.key))
+ if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
}
@@ -964,11 +971,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern struct perf_callchain_entry *
+get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
+ bool crosstask, bool add_mark);
+extern int get_callchain_buffers(void);
+extern void put_callchain_buffers(void);
-static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
+ if (entry->nr < PERF_MAX_STACK_DEPTH) {
entry->ip[entry->nr++] = ip;
+ return 0;
+ } else {
+ return -1; /* no more room, stop walking the stack */
+ }
}
extern int sysctl_perf_event_paranoid;
@@ -1108,12 +1124,6 @@ static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
-extern bool perf_event_can_stop_tick(void);
-#else
-static inline bool perf_event_can_stop_tick(void) { return true; }
-#endif
-
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d6f3641e7933..2abd7918f64f 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,8 +327,6 @@ struct phy_c45_device_ids {
/* phy_device: An instance of a PHY
*
* drv: Pointer to the driver for this PHY instance
- * bus: Pointer to the bus this PHY is on
- * dev: driver model device structure for this PHY
* phy_id: UID for this device found during discovery
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
@@ -338,7 +336,6 @@ struct phy_c45_device_ids {
* suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
- * addr: Bus address of PHY
* link_timeout: The number of timer firings to wait before the
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 2400d2ea4f34..1d41ec44e39d 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -19,7 +19,7 @@ extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
struct device_node *np);
-extern void fixed_phy_del(int phy_addr);
+extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
@@ -40,9 +40,8 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq,
{
return ERR_PTR(-ENODEV);
}
-static inline int fixed_phy_del(int phy_addr)
+static inline void fixed_phy_unregister(struct phy_device *phydev)
{
- return -ENODEV;
}
static inline int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
new file mode 100644
index 000000000000..1d405a2b7272
--- /dev/null
+++ b/include/linux/pkeys.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_PKEYS_H
+#define _LINUX_PKEYS_H
+
+#include <linux/mm_types.h>
+#include <asm/mmu_context.h>
+
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
+#ifdef CONFIG_ARCH_HAS_PKEYS
+#include <asm/pkeys.h>
+#else /* ! CONFIG_ARCH_HAS_PKEYS */
+#define arch_max_pkey() (1)
+#define execute_only_pkey(mm) (0)
+#define arch_override_mprotect_pkey(vma, prot, pkey) (0)
+#define PKEY_DEDICATED_EXECUTE_ONLY 0
+#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+
+/*
+ * This is called from mprotect_pkey().
+ *
+ * Returns true if the protection keys is valid.
+ */
+static inline bool validate_pkey(int pkey)
+{
+ if (pkey < 0)
+ return false;
+ return (pkey < arch_max_pkey());
+}
+
+#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h
new file mode 100644
index 000000000000..7bd8ed7d978e
--- /dev/null
+++ b/include/linux/platform_data/ad5761.h
@@ -0,0 +1,44 @@
+/*
+ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
+ *
+ * Copyright 2016 Qtechnology A/S
+ * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD5761_H__
+#define __LINUX_PLATFORM_DATA_AD5761_H__
+
+/**
+ * enum ad5761_voltage_range - Voltage range the AD5761 is configured for.
+ * @AD5761_VOLTAGE_RANGE_M10V_10V: -10V to 10V
+ * @AD5761_VOLTAGE_RANGE_0V_10V: 0V to 10V
+ * @AD5761_VOLTAGE_RANGE_M5V_5V: -5V to 5V
+ * @AD5761_VOLTAGE_RANGE_0V_5V: 0V to 5V
+ * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V
+ * @AD5761_VOLTAGE_RANGE_M3V_3V: -3V to 3V
+ * @AD5761_VOLTAGE_RANGE_0V_16V: 0V to 16V
+ * @AD5761_VOLTAGE_RANGE_0V_20V: 0V to 20V
+ */
+
+enum ad5761_voltage_range {
+ AD5761_VOLTAGE_RANGE_M10V_10V,
+ AD5761_VOLTAGE_RANGE_0V_10V,
+ AD5761_VOLTAGE_RANGE_M5V_5V,
+ AD5761_VOLTAGE_RANGE_0V_5V,
+ AD5761_VOLTAGE_RANGE_M2V5_7V5,
+ AD5761_VOLTAGE_RANGE_M3V_3V,
+ AD5761_VOLTAGE_RANGE_0V_16V,
+ AD5761_VOLTAGE_RANGE_0V_20V,
+};
+
+/**
+ * struct ad5761_platform_data - AD5761 DAC driver platform data
+ * @voltage_range: Voltage range the AD5761 is configured for
+ */
+
+struct ad5761_platform_data {
+ enum ad5761_voltage_range voltage_range;
+};
+
+#endif
diff --git a/include/linux/spi/ad7879.h b/include/linux/platform_data/ad7879.h
index 58368be0b4c0..69e2e1fd2bc8 100644
--- a/include/linux/spi/ad7879.h
+++ b/include/linux/platform_data/ad7879.h
@@ -1,4 +1,4 @@
-/* linux/spi/ad7879.h */
+/* linux/platform_data/ad7879.h */
/* Touchscreen characteristics vary between boards and models. The
* platform_data for the device's "struct device" holds this information.
diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h
index a81766cae230..9db1b905df24 100644
--- a/include/linux/platform_data/adau17x1.h
+++ b/include/linux/platform_data/adau17x1.h
@@ -1,5 +1,5 @@
/*
- * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961/ADAU1781/ADAU1781 codecs
+ * Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs
*
* Copyright 2011-2014 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index c42aa89d34ee..dc9a13e5acda 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -9,7 +9,7 @@
#define _LINUX_AT24_H
#include <linux/types.h>
-#include <linux/memory.h>
+#include <linux/nvmem-consumer.h>
/**
* struct at24_platform_data - data to set up at24 (generic eeprom) driver
@@ -17,7 +17,7 @@
* @page_size: number of byte which can be written in one go
* @flags: tunable options, check AT24_FLAG_* defines
* @setup: an optional callback invoked after eeprom is probed; enables kernel
- code to access eeprom via memory_accessor, see example
+ code to access eeprom via nvmem, see example
* @context: optional parameter passed to setup()
*
* If you set up a custom eeprom type, please double-check the parameters.
@@ -26,13 +26,13 @@
*
* An example in pseudo code for a setup() callback:
*
- * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
+ * void get_mac_addr(struct mvmem_device *nvmem, void *context)
* {
* u8 *mac_addr = ethernet_pdata->mac_addr;
* off_t offset = context;
*
* // Read MAC addr from EEPROM
- * if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN)
+ * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
* pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
* }
*
@@ -48,7 +48,7 @@ struct at24_platform_data {
#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
- void (*setup)(struct memory_accessor *, void *context);
+ void (*setup)(struct nvmem_device *nvmem, void *context);
void *context;
};
diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h
deleted file mode 100644
index e75dcbf2b230..000000000000
--- a/include/linux/platform_data/brcmfmac-sdio.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _LINUX_BRCMFMAC_PLATFORM_H
-#define _LINUX_BRCMFMAC_PLATFORM_H
-
-/*
- * Platform specific driver functions and data. Through the platform specific
- * device data functions can be provided to help the brcmfmac driver to
- * operate with the device in combination with the used platform.
- *
- * Use the platform data in the following (similar) way:
- *
- *
-#include <brcmfmac_platform.h>
-
-
-static void brcmfmac_power_on(void)
-{
-}
-
-static void brcmfmac_power_off(void)
-{
-}
-
-static void brcmfmac_reset(void)
-{
-}
-
-static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = {
- .power_on = brcmfmac_power_on,
- .power_off = brcmfmac_power_off,
- .reset = brcmfmac_reset
-};
-
-static struct platform_device brcmfmac_device = {
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .id = PLATFORM_DEVID_NONE,
- .dev.platform_data = &brcmfmac_sdio_pdata
-};
-
-void __init brcmfmac_init_pdata(void)
-{
- brcmfmac_sdio_pdata.oob_irq_supported = true;
- brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB);
- brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ |
- IORESOURCE_IRQ_HIGHLEVEL;
- platform_device_register(&brcmfmac_device);
-}
- *
- *
- * Note: the brcmfmac can be loaded as module or be statically built-in into
- * the kernel. If built-in then do note that it uses module_init (and
- * module_exit) routines which equal device_initcall. So if you intend to
- * create a module with the platform specific data for the brcmfmac and have
- * it built-in to the kernel then use a higher initcall then device_initcall
- * (see init.h). If this is not done then brcmfmac will load without problems
- * but will not pickup the platform data.
- *
- * When the driver does not "detect" platform driver data then it will continue
- * without reporting anything and just assume there is no data needed. Which is
- * probably true for most platforms.
- *
- * Explanation of the platform_data fields:
- *
- * drive_strength: is the preferred drive_strength to be used for the SDIO
- * pins. If 0 then a default value will be used. This is the target drive
- * strength, the exact drive strength which will be used depends on the
- * capabilities of the device.
- *
- * oob_irq_supported: does the board have support for OOB interrupts. SDIO
- * in-band interrupts are relatively slow and for having less overhead on
- * interrupt processing an out of band interrupt can be used. If the HW
- * supports this then enable this by setting this field to true and configure
- * the oob related fields.
- *
- * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are
- * used for registering the irq using request_irq function.
- *
- * broken_sg_support: flag for broken sg list support of SDIO host controller.
- * Set this to true if the SDIO host controller has higher align requirement
- * than 32 bytes for each scatterlist item.
- *
- * sd_head_align: alignment requirement for start of data buffer
- *
- * sd_sgentry_align: length alignment requirement for each sg entry
- *
- * power_on: This function is called by the brcmfmac when the module gets
- * loaded. This can be particularly useful for low power devices. The platform
- * spcific routine may for example decide to power up the complete device.
- * If there is no use-case for this function then provide NULL.
- *
- * power_off: This function is called by the brcmfmac when the module gets
- * unloaded. At this point the device can be powered down or otherwise be reset.
- * So if an actual power_off is not supported but reset is then reset the device
- * when this function gets called. This can be particularly useful for low power
- * devices. If there is no use-case for this function (either power-down or
- * reset) then provide NULL.
- *
- * reset: This function can get called if the device communication broke down.
- * This functionality is particularly useful in case of SDIO type devices. It is
- * possible to reset a dongle via sdio data interface, but it requires that
- * this is fully functional. This function is chip/module specific and this
- * function should return only after the complete reset has completed.
- */
-
-#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio"
-
-struct brcmfmac_sdio_platform_data {
- unsigned int drive_strength;
- bool oob_irq_supported;
- unsigned int oob_irq_nr;
- unsigned long oob_irq_flags;
- bool broken_sg_support;
- unsigned short sd_head_align;
- unsigned short sd_sgentry_align;
- void (*power_on)(void);
- void (*power_off)(void);
- void (*reset)(void);
-};
-
-#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
new file mode 100644
index 000000000000..1d30bf278231
--- /dev/null
+++ b/include/linux/platform_data/brcmfmac.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 201 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_BRCMFMAC_PLATFORM_H
+#define _LINUX_BRCMFMAC_PLATFORM_H
+
+
+#define BRCMFMAC_PDATA_NAME "brcmfmac"
+
+#define BRCMFMAC_COUNTRY_BUF_SZ 4
+
+
+/*
+ * Platform specific driver functions and data. Through the platform specific
+ * device data functions and data can be provided to help the brcmfmac driver to
+ * operate with the device in combination with the used platform.
+ */
+
+
+/**
+ * Note: the brcmfmac can be loaded as module or be statically built-in into
+ * the kernel. If built-in then do note that it uses module_init (and
+ * module_exit) routines which equal device_initcall. So if you intend to
+ * create a module with the platform specific data for the brcmfmac and have
+ * it built-in to the kernel then use a higher initcall then device_initcall
+ * (see init.h). If this is not done then brcmfmac will load without problems
+ * but will not pickup the platform data.
+ *
+ * When the driver does not "detect" platform driver data then it will continue
+ * without reporting anything and just assume there is no data needed. Which is
+ * probably true for most platforms.
+ */
+
+/**
+ * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are
+ * supported.
+ */
+enum brcmf_bus_type {
+ BRCMF_BUSTYPE_SDIO,
+ BRCMF_BUSTYPE_USB,
+ BRCMF_BUSTYPE_PCIE
+};
+
+
+/**
+ * struct brcmfmac_sdio_pd - SDIO Device specific platform data.
+ *
+ * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be
+ * used.
+ * @drive_strength: is the preferred drive_strength to be used for the SDIO
+ * pins. If 0 then a default value will be used. This is
+ * the target drive strength, the exact drive strength
+ * which will be used depends on the capabilities of the
+ * device.
+ * @oob_irq_supported: does the board have support for OOB interrupts. SDIO
+ * in-band interrupts are relatively slow and for having
+ * less overhead on interrupt processing an out of band
+ * interrupt can be used. If the HW supports this then
+ * enable this by setting this field to true and configure
+ * the oob related fields.
+ * @oob_irq_nr,
+ * @oob_irq_flags: the OOB interrupt information. The values are used for
+ * registering the irq using request_irq function.
+ * @broken_sg_support: flag for broken sg list support of SDIO host controller.
+ * Set this to true if the SDIO host controller has higher
+ * align requirement than 32 bytes for each scatterlist
+ * item.
+ * @sd_head_align: alignment requirement for start of data buffer.
+ * @sd_sgentry_align: length alignment requirement for each sg entry.
+ * @reset: This function can get called if the device communication
+ * broke down. This functionality is particularly useful in
+ * case of SDIO type devices. It is possible to reset a
+ * dongle via sdio data interface, but it requires that
+ * this is fully functional. This function is chip/module
+ * specific and this function should return only after the
+ * complete reset has completed.
+ */
+struct brcmfmac_sdio_pd {
+ int txglomsz;
+ unsigned int drive_strength;
+ bool oob_irq_supported;
+ unsigned int oob_irq_nr;
+ unsigned long oob_irq_flags;
+ bool broken_sg_support;
+ unsigned short sd_head_align;
+ unsigned short sd_sgentry_align;
+ void (*reset)(void);
+};
+
+/**
+ * struct brcmfmac_pd_cc_entry - Struct for translating user space country code
+ * (iso3166) to firmware country code and
+ * revision.
+ *
+ * @iso3166: iso3166 alpha 2 country code string.
+ * @cc: firmware country code string.
+ * @rev: firmware country code revision.
+ */
+struct brcmfmac_pd_cc_entry {
+ char iso3166[BRCMFMAC_COUNTRY_BUF_SZ];
+ char cc[BRCMFMAC_COUNTRY_BUF_SZ];
+ s32 rev;
+};
+
+/**
+ * struct brcmfmac_pd_cc - Struct for translating country codes as set by user
+ * space to a country code and rev which can be used by
+ * firmware.
+ *
+ * @table_size: number of entries in table (> 0)
+ * @table: array of 1 or more elements with translation information.
+ */
+struct brcmfmac_pd_cc {
+ int table_size;
+ struct brcmfmac_pd_cc_entry table[0];
+};
+
+/**
+ * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type)
+ * is the unique identifier of the device.
+ *
+ * @id: ID of the device for which this data is. In case of SDIO
+ * or PCIE this is the chipid as identified by chip.c In
+ * case of USB this is the chipid as identified by the
+ * device query.
+ * @rev: chip revision, see id.
+ * @bus_type: The type of bus. Some chipid/rev exist for different bus
+ * types. Each bus type has its own set of settings.
+ * @feature_disable: Bitmask of features to disable (override), See feature.c
+ * in brcmfmac for details.
+ * @country_codes: If available, pointer to struct for translating country
+ * codes.
+ * @bus: Bus specific (union) device settings. Currently only
+ * SDIO.
+ */
+struct brcmfmac_pd_device {
+ unsigned int id;
+ unsigned int rev;
+ enum brcmf_bus_type bus_type;
+ unsigned int feature_disable;
+ struct brcmfmac_pd_cc *country_codes;
+ union {
+ struct brcmfmac_sdio_pd sdio;
+ } bus;
+};
+
+/**
+ * struct brcmfmac_platform_data - BRCMFMAC specific platform data.
+ *
+ * @power_on: This function is called by the brcmfmac driver when the module
+ * gets loaded. This can be particularly useful for low power
+ * devices. The platform spcific routine may for example decide to
+ * power up the complete device. If there is no use-case for this
+ * function then provide NULL.
+ * @power_off: This function is called by the brcmfmac when the module gets
+ * unloaded. At this point the devices can be powered down or
+ * otherwise be reset. So if an actual power_off is not supported
+ * but reset is supported by the devices then reset the devices
+ * when this function gets called. This can be particularly useful
+ * for low power devices. If there is no use-case for this
+ * function then provide NULL.
+ */
+struct brcmfmac_platform_data {
+ void (*power_on)(void);
+ void (*power_off)(void);
+ char *fw_alternative_path;
+ int device_count;
+ struct brcmfmac_pd_device devices[0];
+};
+
+
+#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
deleted file mode 100644
index ca13992089b8..000000000000
--- a/include/linux/platform_data/microread.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Driver include for the Inside Secure microread NFC Chip.
- *
- * Copyright (C) 2011 Tieto Poland
- * Copyright (C) 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _MICROREAD_H
-#define _MICROREAD_H
-
-#include <linux/i2c.h>
-
-#define MICROREAD_DRIVER_NAME "microread"
-
-/* board config platform data for microread */
-struct microread_nfc_platform_data {
- unsigned int rst_gpio;
- unsigned int irq_gpio;
- unsigned int ioh_gpio;
-};
-
-#endif /* _MICROREAD_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index 2a330ec9e2af..d1397c8ed94e 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -14,6 +14,7 @@
struct mmp_dma_platdata {
int dma_channels;
+ int nb_requestors;
};
#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index 36bb92172f47..c55e42ee57fa 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -40,7 +40,6 @@ struct s3c2410_nand_set {
char *name;
int *nr_map;
struct mtd_partition *partitions;
- struct nand_ecclayout *ecc_layout;
};
struct s3c2410_platform_nand {
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index aed170588b74..698d0d59db76 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -28,6 +28,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWL333,
TYPE_B57330V2103,
TYPE_NCPXXWF104,
+ TYPE_NCPXXXH103,
};
struct ntc_thermistor_platform_data {
diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h
index 4504d5d592f0..009e1d83fe39 100644
--- a/include/linux/platform_data/sa11x0-serial.h
+++ b/include/linux/platform_data/sa11x0-serial.h
@@ -26,8 +26,12 @@ struct sa1100_port_fns {
void sa1100_register_uart_fns(struct sa1100_port_fns *fns);
void sa1100_register_uart(int idx, int port);
#else
-#define sa1100_register_uart_fns(fns) do { } while (0)
-#define sa1100_register_uart(idx,port) do { } while (0)
+static inline void sa1100_register_uart_fns(struct sa1100_port_fns *fns)
+{
+}
+static inline void sa1100_register_uart(int idx, int port)
+{
+}
#endif
#endif
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index d09275f3cde3..2ba2c34ca3d3 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -21,7 +21,7 @@
#include <linux/device.h>
#include <linux/pm_qos.h>
-#define DRIVER_NAME "omap_uart"
+#define OMAP_SERIAL_DRIVER_NAME "omap_uart"
/*
* Use tty device name as ttyO, [O -> OMAP]
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 25266c600021..308d6044f153 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -42,7 +42,9 @@ extern int pm_clk_create(struct device *dev);
extern void pm_clk_destroy(struct device *dev);
extern int pm_clk_add(struct device *dev, const char *con_id);
extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
+extern int of_pm_clk_add_clks(struct device *dev);
extern void pm_clk_remove(struct device *dev, const char *con_id);
+extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
#else
@@ -69,11 +71,18 @@ static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
return -EINVAL;
}
+static inline int of_pm_clk_add_clks(struct device *dev)
+{
+ return -EINVAL;
+}
static inline void pm_clk_remove(struct device *dev, const char *con_id)
{
}
#define pm_clk_suspend NULL
#define pm_clk_resume NULL
+static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
+{
+}
#endif
#ifdef CONFIG_HAVE_CLK
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index db21d3995f7e..49cd8890b873 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -19,6 +19,8 @@
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
+#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */
+
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
GPD_STATE_POWER_OFF, /* PM domain is off */
@@ -37,6 +39,11 @@ struct gpd_dev_ops {
bool (*active_wakeup)(struct device *dev);
};
+struct genpd_power_state {
+ s64 power_off_latency_ns;
+ s64 power_on_latency_ns;
+};
+
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
@@ -54,9 +61,7 @@ struct generic_pm_domain {
unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
int (*power_off)(struct generic_pm_domain *domain);
- s64 power_off_latency_ns;
int (*power_on)(struct generic_pm_domain *domain);
- s64 power_on_latency_ns;
struct gpd_dev_ops dev_ops;
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
@@ -66,6 +71,10 @@ struct generic_pm_domain {
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
unsigned int flags; /* Bit field of configs for genpd */
+ struct genpd_power_state states[GENPD_MAX_NUM_STATES];
+ unsigned int state_count; /* number of states */
+ unsigned int state_idx; /* state that genpd will go to when off */
+
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 95403d2ccaf5..cccaf4a29e9f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
void dev_pm_opp_put_supported_hw(struct device *dev);
int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
@@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
return 0;
}
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return 0;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
return NULL;
@@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 7c3d11a6b4ad..3ec5309e29f3 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -58,6 +58,11 @@ static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
{
BUG();
}
+
+static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
+{
+ BUG();
+}
#endif
/*
@@ -186,6 +191,20 @@ static inline void clear_pmem(void __pmem *addr, size_t size)
}
/**
+ * invalidate_pmem - flush a pmem range from the cache hierarchy
+ * @addr: virtual start address
+ * @size: bytes to invalidate (internally aligned to cache line size)
+ *
+ * For platforms that support clearing poison this flushes any poisoned
+ * ranges out of the cache
+ */
+static inline void invalidate_pmem(void __pmem *addr, size_t size)
+{
+ if (arch_has_pmem_api())
+ arch_invalidate_pmem(addr, size);
+}
+
+/**
* wb_cache_pmem - write back processor cache for PMEM memory range
* @addr: virtual start address
* @size: number of bytes to write back
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 4a27153574e2..51334edec506 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -30,7 +30,11 @@
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/debug-pagealloc.c **********/
+#ifdef CONFIG_PAGE_POISONING_ZERO
+#define PAGE_POISON 0x00
+#else
#define PAGE_POISON 0xaa
+#endif
/********** mm/page_alloc.c ************/
diff --git a/include/linux/poll.h b/include/linux/poll.h
index c08386fb3e08..9fb4f40d9a26 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
-extern long select_estimate_accuracy(struct timespec *tv);
+extern u64 select_estimate_accuracy(struct timespec *tv);
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 907f3fd191ac..62d44c176071 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -128,9 +128,6 @@ void posix_cpu_timer_schedule(struct k_itimer *timer);
void run_posix_cpu_timers(struct task_struct *task);
void posix_cpu_timers_exit(struct task_struct *task);
void posix_cpu_timers_exit_group(struct task_struct *task);
-
-bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk);
-
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval);
diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h
index f536164a6069..6b750c1a45fa 100644
--- a/include/linux/power/bq24735-charger.h
+++ b/include/linux/power/bq24735-charger.h
@@ -32,6 +32,8 @@ struct bq24735_platform {
int status_gpio_active_low;
bool status_gpio_valid;
+ bool ext_control;
+
char **supplied_to;
size_t num_supplicants;
};
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 998d8f1c3c91..b50c0492629d 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -49,6 +49,7 @@ struct bq27xxx_reg_cache {
struct bq27xxx_device_info {
struct device *dev;
+ int id;
enum bq27xxx_chip chip;
const char *name;
struct bq27xxx_access_methods bus;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ef9f1592185d..751061790626 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -163,6 +163,9 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */
+ POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */
+ POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */
};
enum power_supply_notifier_events {
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 54bf1484d41f..35ac903956c7 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
kt->nsec = ts.tv_nsec;
}
-#ifdef CONFIG_NTP_PPS
-
static inline void pps_get_ts(struct pps_event_time *ts)
{
- ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
-}
+ struct system_time_snapshot snap;
-#else /* CONFIG_NTP_PPS */
-
-static inline void pps_get_ts(struct pps_event_time *ts)
-{
- ktime_get_real_ts64(&ts->ts_real);
+ ktime_get_snapshot(&snap);
+ ts->ts_real = ktime_to_timespec64(snap.real);
+#ifdef CONFIG_NTP_PPS
+ ts->ts_raw = ktime_to_timespec64(snap.raw);
+#endif
}
-#endif /* CONFIG_NTP_PPS */
-
/* Subtract known time delay from PPS event time(s) */
static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
{
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 42dfc615dbf8..de0e7719d4c5 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -9,6 +9,8 @@
struct pid_namespace;
struct nsproxy;
struct path;
+struct task_struct;
+struct inode;
struct proc_ns_operations {
const char *name;
@@ -24,6 +26,7 @@ extern const struct proc_ns_operations ipcns_operations;
extern const struct proc_ns_operations pidns_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
+extern const struct proc_ns_operations cgroupns_operations;
/*
* We always define these enumerators
@@ -34,6 +37,7 @@ enum {
PROC_UTS_INIT_INO = 0xEFFFFFFEU,
PROC_USER_INIT_INO = 0xEFFFFFFDU,
PROC_PID_INIT_INO = 0xEFFFFFFCU,
+ PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
};
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 12c4865457ad..393efe2edf9a 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
bool psci_power_state_loses_context(u32 state);
bool psci_power_state_is_valid(u32 state);
+int psci_cpu_init_idle(unsigned int cpu);
+int psci_cpu_suspend_enter(unsigned long index);
+
struct psci_operations {
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9c9d6c154c8e..4660aaa3195e 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -76,7 +76,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
struct ramoops_platform_data {
unsigned long mem_size;
- unsigned long mem_address;
+ phys_addr_t mem_address;
unsigned int mem_type;
unsigned long record_size;
unsigned long console_size;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index b8b73066d137..6b15e168148a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -38,6 +38,7 @@ struct ptp_clock_request {
};
};
+struct system_device_crosststamp;
/**
* struct ptp_clock_info - decribes a PTP hardware clock
*
@@ -67,6 +68,11 @@ struct ptp_clock_request {
* @gettime64: Reads the current time from the hardware clock.
* parameter ts: Holds the result.
*
+ * @getcrosststamp: Reads the current time from the hardware clock and
+ * system clock simultaneously.
+ * parameter cts: Contains timestamp (device,system) pair,
+ * where system time is realtime and monotonic.
+ *
* @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set.
*
@@ -105,6 +111,8 @@ struct ptp_clock_info {
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*getcrosststamp)(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index c2f2574ff61c..2a097d176ba9 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
QUARK_X1000_SSP,
LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
LPSS_BYT_SSP,
+ LPSS_BSW_SSP,
LPSS_SPT_SSP,
LPSS_BXT_SSP,
};
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 1d1ba2c5ee7a..53ecb37ae563 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -11,9 +11,11 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+#define X_FINAL_CLEANUP_AGG_INT 1
+
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 4
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 7
+#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -152,6 +154,9 @@
/* number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
+/* the size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
+
/* base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context
*/
@@ -285,6 +290,16 @@
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+#define SDM_COMP_TYPE_PXP 5
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+#define SDM_COMP_TYPE_RAM 8
+
/******************/
/* PBF CONSTANTS */
/******************/
@@ -335,7 +350,7 @@ struct event_ring_entry {
/* Multi function mode */
enum mf_mode {
- SF,
+ ERROR_MODE /* Unsupported mode */,
MF_OVLAN,
MF_NPAR,
MAX_MF_MODE
@@ -606,4 +621,19 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
+struct tunnel_parsing_flags {
+ u8 flags;
+#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 320b3373ac1d..092cb0c1afcb 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -17,10 +17,8 @@
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
-#define ETH_RX_NUM_NEXT_PAGE_SGES 2
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
@@ -34,7 +32,8 @@
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define ETH_REG_CQE_PBL_SIZE 3
+/* Maximum number of buffers, used for RX packet placement */
+#define ETH_RX_MAX_BUFF_PER_PKT 5
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
@@ -54,9 +53,9 @@
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_SGL_SIZE 3
-#define ETH_TPA_CQE_CONT_SGL_SIZE 6
-#define ETH_TPA_CQE_END_SGL_SIZE 4
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Queue Zone sizes */
#define TSTORM_QZONE_SIZE 0
@@ -74,18 +73,18 @@ struct coalescing_timeset {
struct eth_tx_1st_bd_flags {
u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
@@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd {
__le16 vlan;
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
- __le16 fw_use_only;
+ __le16 bitfields;
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
};
/* The parsing information data for the second tx bd of a given packet. */
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
- __le16 bitfields;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
- __le16 bitfields2;
+ __le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14
-#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+ __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
/* Regular ETH Rx FP CQE. */
@@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe {
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
__le32 rss_hash;
- __le16 len_on_bd;
+ __le16 len_on_first_bd;
u8 placement_offset;
- u8 reserved;
- __le16 pbl[ETH_REG_CQE_PBL_SIZE];
- u8 reserved1[10];
+ struct tunnel_parsing_flags tunnel_pars_flags;
+ u8 bd_num;
+ u8 reserved[7];
+ u32 fw_debug;
+ u8 reserved1[3];
+ u8 flags;
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+};
+
+/* TPA-continue ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_cont_cqe {
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved[5];
+ u8 reserved1;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+};
+
+/* TPA-end ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_end_cqe {
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 total_packet_len;
+ u8 num_of_bds;
+ u8 end_reason;
+ __le16 num_of_coalesced_segs;
+ __le32 ts_delta;
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ u8 reserved1[3];
+ u8 reserved2;
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+};
+
+/* TPA-start ETH Rx FP CQE. */
+struct eth_fast_path_rx_tpa_start_cqe {
+ u8 type;
+ u8 bitfields;
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+ __le16 seg_len;
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
+ struct tunnel_parsing_flags tunnel_pars_flags;
+ u8 tpa_agg_index;
+ u8 header_len;
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ u32 fw_debug;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe {
u8 type;
u8 ramrod_cmd_id;
u8 error_flag;
- u8 reserved[27];
+ u8 reserved[25];
__le16 echo;
+ u8 reserved1;
+ u8 flags;
+/* for PMD mode - valid indication */
+#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
+/* for PMD mode - valid toggle indication */
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
};
/* union for all ETH Rx CQE types */
union eth_rx_cqe {
struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
struct eth_slow_path_rx_cqe slow_path;
};
@@ -183,15 +258,18 @@ enum eth_rx_cqe_type {
ETH_RX_CQE_TYPE_UNUSED,
ETH_RX_CQE_TYPE_REGULAR,
ETH_RX_CQE_TYPE_SLOW_PATH,
+ ETH_RX_CQE_TYPE_TPA_START,
+ ETH_RX_CQE_TYPE_TPA_CONT,
+ ETH_RX_CQE_TYPE_TPA_END,
MAX_ETH_RX_CQE_TYPE
};
/* ETH Rx producers data */
struct eth_rx_prod_data {
__le16 bd_prod;
- __le16 sge_prod;
__le16 cqe_prod;
__le16 reserved;
+ __le16 reserved1;
};
/* The first tx bd of a given packet */
@@ -211,12 +289,17 @@ struct eth_tx_2nd_bd {
/* The parsing information data for the third tx bd of a given packet. */
struct eth_tx_data_3rd_bd {
__le16 lso_mss;
- u8 bitfields;
+ __le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
- u8 resereved0[3];
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+ u8 tunn_l4_hdr_start_offset_w;
+ u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
@@ -226,12 +309,24 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
+/* Complementary information for the regular tx bd of a given packet. */
+struct eth_tx_data_bd {
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
/* The common non-special TX BD ring element */
struct eth_tx_bd {
struct regpair addr;
__le16 nbytes;
- __le16 reserved0;
- __le32 reserved1;
+ struct eth_tx_data_bd data;
};
union eth_tx_bd_types {
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 41b9049b57e2..5f8fcaaa6504 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -19,6 +19,10 @@
/* dma_addr_t manip */
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val) do { \
+ (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val)); \
+ } while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 81ab178e31c1..e1d69834a11f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -33,10 +33,20 @@ struct qed_update_vport_params {
u8 vport_id;
u8 update_vport_active_flg;
u8 vport_active_flg;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
u8 update_rss_flg;
struct qed_update_vport_rss_params rss_params;
};
+struct qed_start_vport_params {
+ bool remove_inner_vlan;
+ bool gro_enable;
+ bool drop_ttl0;
+ u8 vport_id;
+ u16 mtu;
+};
+
struct qed_stop_rxq_params {
u8 rss_id;
u8 rx_queue_id;
@@ -116,9 +126,7 @@ struct qed_eth_ops {
void *cookie);
int (*vport_start)(struct qed_dev *cdev,
- u8 vport_id, u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg);
+ struct qed_start_vport_params *params);
int (*vport_stop)(struct qed_dev *cdev,
u8 vport_id);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d4a32e878180..1f7599c77cd4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -80,7 +80,7 @@ struct qed_dev_info {
u8 num_hwfns;
u8 hw_mac[ETH_ALEN];
- bool is_mf;
+ bool is_mf_default;
/* FW version */
u16 fw_major;
@@ -360,6 +360,12 @@ enum DP_MODULE {
/* to be added...up to 0x8000000 */
};
+enum qed_mf_mode {
+ QED_MF_DEFAULT,
+ QED_MF_OVLAN,
+ QED_MF_NPAR,
+};
+
struct qed_eth_stats {
u64 no_buff_discards;
u64 packet_too_big_discard;
@@ -440,6 +446,12 @@ struct qed_eth_stats {
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
+struct qed_sb_cnt_info {
+ int sb_cnt;
+ int sb_iov_cnt;
+ int sb_free_blk;
+};
+
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
{
u32 prod = 0;
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
index bd466439c588..3bdfa70bc642 100644
--- a/include/linux/quicklist.h
+++ b/include/linux/quicklist.h
@@ -5,7 +5,7 @@
* as needed after allocation when they are freed. Per cpu lists of pages
* are kept that only contain node local pages.
*
- * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com>
+ * (C) 2007, SGI. Christoph Lameter <cl@linux.com>
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/include/linux/quota.h b/include/linux/quota.h
index b2505acfd3c0..9dfb6bce8c9e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -306,6 +306,7 @@ struct quota_format_ops {
int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */
int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */
int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */
+ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */
};
/* Operations working with dquots */
@@ -321,6 +322,8 @@ struct dquot_operations {
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
+ /* Get next ID with active quota structure */
+ int (*get_next_id) (struct super_block *sb, struct kqid *qid);
};
struct path;
@@ -425,6 +428,8 @@ struct quotactl_ops {
int (*quota_sync)(struct super_block *, int);
int (*set_info)(struct super_block *, int, struct qc_info *);
int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*get_nextdqblk)(struct super_block *, struct kqid *,
+ struct qc_dqblk *);
int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*get_state)(struct super_block *, struct qc_state *);
int (*rm_xquota)(struct super_block *, unsigned int);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 7a57c28eb5e7..f00fa86ac966 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -82,6 +82,7 @@ int dquot_commit(struct dquot *dquot);
int dquot_acquire(struct dquot *dquot);
int dquot_release(struct dquot *dquot);
int dquot_commit_info(struct super_block *sb, int type);
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid);
int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
@@ -99,6 +100,8 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state);
int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *id,
+ struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f54be7082207..51a97ac8bfbf 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -21,6 +21,7 @@
#ifndef _LINUX_RADIX_TREE_H
#define _LINUX_RADIX_TREE_H
+#include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/types.h>
#include <linux/bug.h>
@@ -270,8 +271,15 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
}
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
- struct radix_tree_node **nodep, void ***slotp);
-int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+ unsigned order, struct radix_tree_node **nodep,
+ void ***slotp);
+int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
+ unsigned order, void *);
+static inline int radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *entry)
+{
+ return __radix_tree_insert(root, index, 0, entry);
+}
void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
struct radix_tree_node **nodep, void ***slotp);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
@@ -395,6 +403,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
}
/**
+ * radix_tree_iter_next - resume iterating when the chunk may be invalid
+ * @iter: iterator state
+ *
+ * If the iterator needs to release then reacquire a lock, the chunk may
+ * have been invalidated by an insertion or deletion. Call this function
+ * to continue the iteration from the next index.
+ */
+static inline __must_check
+void **radix_tree_iter_next(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index + 1;
+ iter->tags = 0;
+ return NULL;
+}
+
+/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
diff --git a/include/linux/random.h b/include/linux/random.h
index a75840c1aa71..9c29122037f9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
#endif
unsigned int get_random_int(void);
+unsigned long get_random_long(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
u32 prandom_u32(void);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 14ec1652daf4..17d4f849c65e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -319,6 +319,27 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
})
/**
+ * list_next_or_null_rcu - get the first element from a list
+ * @head: the head for the list.
+ * @ptr: the list head to take the next element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the ptr is at the end of the list, NULL is returned.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_next_or_null_rcu(head, ptr, type, member) \
+({ \
+ struct list_head *__head = (head); \
+ struct list_head *__ptr = (ptr); \
+ struct list_head *__next = READ_ONCE(__ptr->next); \
+ likely(__next != __head) ? list_entry_rcu(__next, type, \
+ member) : NULL; \
+})
+
+/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 14e6f47ee16f..2657aff2725b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -332,9 +332,7 @@ void rcu_init(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
-struct notifier_block;
-int rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu);
+void rcu_report_dead(unsigned int cpu);
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
@@ -360,8 +358,6 @@ void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
-static inline void rcu_user_hooks_switch(struct task_struct *prev,
- struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 18394343f489..3dc08ce15426 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -65,6 +65,36 @@ struct reg_sequence {
unsigned int delay_us;
};
+#define regmap_update_bits(map, reg, mask, val) \
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
+#define regmap_update_bits_async(map, reg, mask, val)\
+ regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
+#define regmap_update_bits_check(map, reg, mask, val, change)\
+ regmap_update_bits_base(map, reg, mask, val, change, false, false)
+#define regmap_update_bits_check_async(map, reg, mask, val, change)\
+ regmap_update_bits_base(map, reg, mask, val, change, true, false)
+
+#define regmap_write_bits(map, reg, mask, val) \
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
+
+#define regmap_field_write(field, val) \
+ regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
+#define regmap_field_force_write(field, val) \
+ regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
+#define regmap_field_update_bits(field, mask, val)\
+ regmap_field_update_bits_base(field, mask, val, NULL, false, false)
+#define regmap_field_force_update_bits(field, mask, val) \
+ regmap_field_update_bits_base(field, mask, val, NULL, false, true)
+
+#define regmap_fields_write(field, id, val) \
+ regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
+#define regmap_fields_force_write(field, id, val) \
+ regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
+#define regmap_fields_update_bits(field, id, mask, val)\
+ regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
+#define regmap_fields_force_update_bits(field, id, mask, val) \
+ regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
+
#ifdef CONFIG_REGMAP
enum regmap_endian {
@@ -162,7 +192,7 @@ typedef void (*regmap_unlock)(void *);
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
- * @max_register: Optional, specifies the maximum valid register index.
+ * @max_register: Optional, specifies the maximum valid register address.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
* @rd_table: As above, for read access.
@@ -691,18 +721,9 @@ int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val);
-int regmap_write_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val);
-int regmap_update_bits_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val);
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change);
-int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change);
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
@@ -770,18 +791,14 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
int regmap_field_read(struct regmap_field *field, unsigned int *val);
-int regmap_field_write(struct regmap_field *field, unsigned int val);
-int regmap_field_update_bits(struct regmap_field *field,
- unsigned int mask, unsigned int val);
-
-int regmap_fields_write(struct regmap_field *field, unsigned int id,
- unsigned int val);
-int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
- unsigned int val);
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
int regmap_fields_read(struct regmap_field *field, unsigned int id,
unsigned int *val);
-int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
- unsigned int mask, unsigned int val);
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
/**
* Description of an IRQ for the generic regmap irq_chip.
@@ -868,6 +885,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data);
+
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
@@ -937,42 +962,26 @@ static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
-static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- WARN_ONCE(1, "regmap API is disabled");
- return -EINVAL;
-}
-
-static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- WARN_ONCE(1, "regmap API is disabled");
- return -EINVAL;
-}
-
-static inline int regmap_update_bits_async(struct regmap *map,
- unsigned int reg,
- unsigned int mask, unsigned int val)
+static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
}
-static inline int regmap_update_bits_check(struct regmap *map,
- unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
+static inline int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
}
-static inline int regmap_update_bits_check_async(struct regmap *map,
- unsigned int reg,
- unsigned int mask,
- unsigned int val,
- bool *change)
+static inline int regmap_fields_update_bits_base(struct regmap_field *field,
+ unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 15fa8f2d35c9..2eb386017fa5 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -68,12 +68,12 @@ enum {
* act8865_regulator_data - regulator data
* @id: regulator id
* @name: regulator name
- * @platform_data: regulator init data
+ * @init_data: regulator init data
*/
struct act8865_regulator_data {
int id;
const char *name;
- struct regulator_init_data *platform_data;
+ struct regulator_init_data *init_data;
};
/**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 16ac9e108806..cd271e89a7e6 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,8 @@ struct regulator_linear_range {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
+ * @set_active_discharge: Set active discharge enable/disable of regulators.
+ *
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
* @get_status: Return actual (not as-configured) status of regulator, as a
@@ -149,6 +151,7 @@ struct regulator_ops {
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
int (*set_over_current_protection) (struct regulator_dev *);
+ int (*set_active_discharge) (struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -266,6 +269,14 @@ enum regulator_type {
* @bypass_mask: Mask for control when using regmap set_bypass
* @bypass_val_on: Enabling value for control when using regmap set_bypass
* @bypass_val_off: Disabling value for control when using regmap set_bypass
+ * @active_discharge_off: Enabling value for control when using regmap
+ * set_active_discharge
+ * @active_discharge_on: Disabling value for control when using regmap
+ * set_active_discharge
+ * @active_discharge_mask: Mask for control when using regmap
+ * set_active_discharge
+ * @active_discharge_reg: Register for control when using regmap
+ * set_active_discharge
*
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
@@ -315,6 +326,10 @@ struct regulator_desc {
unsigned int bypass_mask;
unsigned int bypass_val_on;
unsigned int bypass_val_off;
+ unsigned int active_discharge_on;
+ unsigned int active_discharge_off;
+ unsigned int active_discharge_mask;
+ unsigned int active_discharge_reg;
unsigned int enable_time;
@@ -447,6 +462,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
+int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
+ bool enable);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
#endif
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index 132e05c46661..6029279f4eed 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -18,6 +18,9 @@
#define LP872X_MAX_REGULATORS 9
+#define LP8720_ENABLE_DELAY 200
+#define LP8725_ENABLE_DELAY 30000
+
enum lp872x_regulator_id {
LP8720_ID_BASE,
LP8720_ID_LDO1 = LP8720_ID_BASE,
@@ -79,12 +82,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
+ * @enable_gpio : gpio pin number for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
+ int enable_gpio;
};
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a1067d0b3991..5d627c83a630 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,13 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/* Regulator active discharge flags */
+enum regulator_active_discharge {
+ REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
+ REGULATOR_ACTIVE_DISCHARGE_DISABLE,
+ REGULATOR_ACTIVE_DISCHARGE_ENABLE,
+};
+
/**
* struct regulator_state - regulator state during low power system states
*
@@ -100,6 +107,9 @@ struct regulator_state {
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @active_discharge: Enable/disable active discharge. The enum
+ * regulator_active_discharge values are used for
+ * initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
*/
struct regulation_constraints {
@@ -140,6 +150,8 @@ struct regulation_constraints {
unsigned int ramp_delay;
unsigned int enable_time;
+ unsigned int active_discharge;
+
/* constraint flags */
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ce6b962ffed4..a3a5bcdb1d02 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -38,7 +38,7 @@ struct of_phandle_args;
* @nr_resets: number of reset controls in this reset controller device
*/
struct reset_controller_dev {
- struct reset_control_ops *ops;
+ const struct reset_control_ops *ops;
struct module *owner;
struct list_head list;
struct device_node *of_node;
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
deleted file mode 100644
index 20bcb55498cd..000000000000
--- a/include/linux/rfkill-gpio.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2011, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-
-#ifndef __RFKILL_GPIO_H
-#define __RFKILL_GPIO_H
-
-#include <linux/types.h>
-#include <linux/rfkill.h>
-
-/**
- * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
- * for unused gpio's, the expected value is -1.
- * @name: name for the gpio rf kill instance
- */
-
-struct rfkill_gpio_platform_data {
- char *name;
- enum rfkill_type type;
-};
-
-#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index d9010789b4e8..e6a0031d1b1f 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill);
*
* Pause polling -- say transmitter is off for other reasons.
* NOTE: not necessary for suspend/resume -- in that case the
- * core stops polling anyway
+ * core stops polling anyway (but will also correctly handle
+ * the case of polling having been paused before suspend.)
*/
void rfkill_pause_polling(struct rfkill *rfkill);
@@ -212,6 +213,15 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
+
+/**
+ * rfkill_find_type - Helpper for finding rfkill type by name
+ * @name: the name of the type
+ *
+ * Returns enum rfkill_type that conrresponds the name.
+ */
+enum rfkill_type rfkill_find_type(const char *name);
+
#else /* !RFKILL */
static inline struct rfkill * __must_check
rfkill_alloc(const char *name,
@@ -268,6 +278,12 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
{
return false;
}
+
+static inline enum rfkill_type rfkill_find_type(const char *name)
+{
+ return RFKILL_TYPE_ALL;
+}
+
#endif /* RFKILL || RFKILL_MODULE */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index cde976e86b48..aa2323893e8d 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -137,6 +137,13 @@ struct rio_switch_ops {
int (*em_handle) (struct rio_dev *dev, u8 swport);
};
+enum rio_device_state {
+ RIO_DEVICE_INITIALIZING,
+ RIO_DEVICE_RUNNING,
+ RIO_DEVICE_GONE,
+ RIO_DEVICE_SHUTDOWN,
+};
+
/**
* struct rio_dev - RIO device info
* @global_list: Node in list of all RIO devices
@@ -165,6 +172,7 @@ struct rio_switch_ops {
* @destid: Network destination ID (or associated destid for switch)
* @hopcount: Hopcount to this device
* @prev: Previous RIO device connected to the current one
+ * @state: device state
* @rswitch: struct rio_switch (if valid for this device)
*/
struct rio_dev {
@@ -194,6 +202,7 @@ struct rio_dev {
u16 destid;
u8 hopcount;
struct rio_dev *prev;
+ atomic_t state;
struct rio_switch rswitch[0]; /* RIO switch info */
};
@@ -202,6 +211,7 @@ struct rio_dev {
#define to_rio_dev(n) container_of(n, struct rio_dev, dev)
#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
#define to_rio_mport(n) container_of(n, struct rio_mport, dev)
+#define to_rio_net(n) container_of(n, struct rio_net, dev)
/**
* struct rio_msg - RIO message event
@@ -235,8 +245,11 @@ enum rio_phy_type {
/**
* struct rio_mport - RIO master port info
* @dbells: List of doorbell events
+ * @pwrites: List of portwrite events
* @node: Node in global list of master ports
* @nnode: Node in network list of master ports
+ * @net: RIO net this mport is attached to
+ * @lock: lock to synchronize lists manipulations
* @iores: I/O mem resource that this master port interface owns
* @riores: RIO resources that this master port interfaces owns
* @inb_msg: RIO inbound message event descriptors
@@ -253,11 +266,16 @@ enum rio_phy_type {
* @priv: Master port private data
* @dma: DMA device associated with mport
* @nscan: RapidIO network enumeration/discovery operations
+ * @state: mport device state
+ * @pwe_refcnt: port-write enable ref counter to track enable/disable requests
*/
struct rio_mport {
struct list_head dbells; /* list of doorbell events */
+ struct list_head pwrites; /* list of portwrite events */
struct list_head node; /* node in global list of ports */
struct list_head nnode; /* node in net list of ports */
+ struct rio_net *net; /* RIO net this mport is attached to */
+ struct mutex lock;
struct resource iores;
struct resource riores[RIO_MAX_MPORT_RESOURCES];
struct rio_msg inb_msg[RIO_MAX_MBOX];
@@ -280,20 +298,20 @@ struct rio_mport {
struct dma_device dma;
#endif
struct rio_scan *nscan;
+ atomic_t state;
+ unsigned int pwe_refcnt;
};
+static inline int rio_mport_is_running(struct rio_mport *mport)
+{
+ return atomic_read(&mport->state) == RIO_DEVICE_RUNNING;
+}
+
/*
* Enumeration/discovery control flags
*/
#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */
-struct rio_id_table {
- u16 start; /* logical minimal id */
- u32 max; /* max number of IDs in table */
- spinlock_t lock;
- unsigned long *table;
-};
-
/**
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
@@ -302,7 +320,9 @@ struct rio_id_table {
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
- * @destid_table: destID allocation table
+ * @dev: Device object
+ * @enum_data: private data specific to a network enumerator
+ * @release: enumerator-specific release callback
*/
struct rio_net {
struct list_head node; /* node in list of networks */
@@ -311,7 +331,53 @@ struct rio_net {
struct list_head mports; /* list of ports accessing net */
struct rio_mport *hport; /* primary port for accessing net */
unsigned char id; /* RIO network ID */
- struct rio_id_table destid_table; /* destID allocation table */
+ struct device dev;
+ void *enum_data; /* private data for enumerator of the network */
+ void (*release)(struct rio_net *net);
+};
+
+enum rio_link_speed {
+ RIO_LINK_DOWN = 0, /* SRIO Link not initialized */
+ RIO_LINK_125 = 1, /* 1.25 GBaud */
+ RIO_LINK_250 = 2, /* 2.5 GBaud */
+ RIO_LINK_312 = 3, /* 3.125 GBaud */
+ RIO_LINK_500 = 4, /* 5.0 GBaud */
+ RIO_LINK_625 = 5 /* 6.25 GBaud */
+};
+
+enum rio_link_width {
+ RIO_LINK_1X = 0,
+ RIO_LINK_1XR = 1,
+ RIO_LINK_2X = 3,
+ RIO_LINK_4X = 2,
+ RIO_LINK_8X = 4,
+ RIO_LINK_16X = 5
+};
+
+enum rio_mport_flags {
+ RIO_MPORT_DMA = (1 << 0), /* supports DMA data transfers */
+ RIO_MPORT_DMA_SG = (1 << 1), /* DMA supports HW SG mode */
+ RIO_MPORT_IBSG = (1 << 2), /* inbound mapping supports SG */
+};
+
+/**
+ * struct rio_mport_attr - RIO mport device attributes
+ * @flags: mport device capability flags
+ * @link_speed: SRIO link speed value (as defined by RapidIO specification)
+ * @link_width: SRIO link width value (as defined by RapidIO specification)
+ * @dma_max_sge: number of SG list entries that can be handled by DMA channel(s)
+ * @dma_max_size: max number of bytes in single DMA transfer (SG entry)
+ * @dma_align: alignment shift for DMA operations (as for other DMA operations)
+ */
+struct rio_mport_attr {
+ int flags;
+ int link_speed;
+ int link_width;
+
+ /* DMA capability info: valid only if RIO_MPORT_DMA flag is set */
+ int dma_max_sge;
+ int dma_max_size;
+ int dma_align;
};
/* Low-level architecture-dependent routines */
@@ -333,6 +399,9 @@ struct rio_net {
* @get_inb_message: Callback to get a message from an inbound mailbox queue.
* @map_inb: Callback to map RapidIO address region into local memory space.
* @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb().
+ * @query_mport: Callback to query mport device attributes.
+ * @map_outb: Callback to map outbound address region into local memory space.
+ * @unmap_outb: Callback to unmap outbound RapidIO address region.
*/
struct rio_ops {
int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
@@ -358,6 +427,11 @@ struct rio_ops {
int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
u64 rstart, u32 size, u32 flags);
void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
+ int (*query_mport)(struct rio_mport *mport,
+ struct rio_mport_attr *attr);
+ int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart,
+ u32 size, u32 flags, dma_addr_t *laddr);
+ void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart);
};
#define RIO_RESOURCE_MEM 0x00000100
@@ -376,6 +450,7 @@ struct rio_ops {
* @id_table: RIO device ids to be associated with this driver
* @probe: RIO device inserted
* @remove: RIO device removed
+ * @shutdown: shutdown notification callback
* @suspend: RIO device suspended
* @resume: RIO device awakened
* @enable_wake: RIO device enable wake event
@@ -390,6 +465,7 @@ struct rio_driver {
const struct rio_device_id *id_table;
int (*probe) (struct rio_dev * dev, const struct rio_device_id * id);
void (*remove) (struct rio_dev * dev);
+ void (*shutdown)(struct rio_dev *dev);
int (*suspend) (struct rio_dev * dev, u32 state);
int (*resume) (struct rio_dev * dev);
int (*enable_wake) (struct rio_dev * dev, u32 state, int enable);
@@ -476,10 +552,14 @@ struct rio_scan_node {
};
/* Architecture and hardware-specific functions */
+extern int rio_mport_initialize(struct rio_mport *);
extern int rio_register_mport(struct rio_mport *);
+extern int rio_unregister_mport(struct rio_mport *);
extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
extern void rio_close_inb_mbox(struct rio_mport *, int);
extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
extern void rio_close_outb_mbox(struct rio_mport *, int);
+extern int rio_query_mport(struct rio_mport *port,
+ struct rio_mport_attr *mport_attr);
#endif /* LINUX_RIO_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 9fc2f213e74f..0834264fb7f2 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -369,12 +369,24 @@ void rio_release_region(struct rio_dev *, int);
extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local,
u64 rbase, u32 size, u32 rflags);
extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart);
+extern int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase,
+ u32 size, u32 rflags, dma_addr_t *local);
+extern void rio_unmap_outb_region(struct rio_mport *mport,
+ u16 destid, u64 rstart);
/* Port-Write management */
extern int rio_request_inb_pwrite(struct rio_dev *,
int (*)(struct rio_dev *, union rio_pw_msg*, int));
extern int rio_release_inb_pwrite(struct rio_dev *);
-extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg);
+extern int rio_add_mport_pw_handler(struct rio_mport *mport, void *dev_id,
+ int (*pwcback)(struct rio_mport *mport, void *dev_id,
+ union rio_pw_msg *msg, int step));
+extern int rio_del_mport_pw_handler(struct rio_mport *mport, void *dev_id,
+ int (*pwcback)(struct rio_mport *mport, void *dev_id,
+ union rio_pw_msg *msg, int step));
+extern int rio_inb_pwrite_handler(struct rio_mport *mport,
+ union rio_pw_msg *pw_msg);
+extern void rio_pw_enable(struct rio_mport *mport, int enable);
/* LDM support */
int rio_register_driver(struct rio_driver *);
@@ -435,6 +447,7 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
/* Misc driver helpers */
extern u16 rio_local_get_device_id(struct rio_mport *port);
+extern void rio_local_set_device_id(struct rio_mport *port, u16 did);
extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
struct rio_dev *from);
diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h
new file mode 100644
index 000000000000..b65d19df76d2
--- /dev/null
+++ b/include/linux/rio_mport_cdev.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2015-2016, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, Texas Instruments Incorporated
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_MPORT_CDEV_H_
+#define _RIO_MPORT_CDEV_H_
+
+#ifndef __user
+#define __user
+#endif
+
+struct rio_mport_maint_io {
+ uint32_t rioid; /* destID of remote device */
+ uint32_t hopcount; /* hopcount to remote device */
+ uint32_t offset; /* offset in register space */
+ size_t length; /* length in bytes */
+ void __user *buffer; /* data buffer */
+};
+
+/*
+ * Definitions for RapidIO data transfers:
+ * - memory mapped (MAPPED)
+ * - packet generation from memory (TRANSFER)
+ */
+#define RIO_TRANSFER_MODE_MAPPED (1 << 0)
+#define RIO_TRANSFER_MODE_TRANSFER (1 << 1)
+#define RIO_CAP_DBL_SEND (1 << 2)
+#define RIO_CAP_DBL_RECV (1 << 3)
+#define RIO_CAP_PW_SEND (1 << 4)
+#define RIO_CAP_PW_RECV (1 << 5)
+#define RIO_CAP_MAP_OUTB (1 << 6)
+#define RIO_CAP_MAP_INB (1 << 7)
+
+struct rio_mport_properties {
+ uint16_t hdid;
+ uint8_t id; /* Physical port ID */
+ uint8_t index;
+ uint32_t flags;
+ uint32_t sys_size; /* Default addressing size */
+ uint8_t port_ok;
+ uint8_t link_speed;
+ uint8_t link_width;
+ uint32_t dma_max_sge;
+ uint32_t dma_max_size;
+ uint32_t dma_align;
+ uint32_t transfer_mode; /* Default transfer mode */
+ uint32_t cap_sys_size; /* Capable system sizes */
+ uint32_t cap_addr_size; /* Capable addressing sizes */
+ uint32_t cap_transfer_mode; /* Capable transfer modes */
+ uint32_t cap_mport; /* Mport capabilities */
+};
+
+/*
+ * Definitions for RapidIO events;
+ * - incoming port-writes
+ * - incoming doorbells
+ */
+#define RIO_DOORBELL (1 << 0)
+#define RIO_PORTWRITE (1 << 1)
+
+struct rio_doorbell {
+ uint32_t rioid;
+ uint16_t payload;
+};
+
+struct rio_doorbell_filter {
+ uint32_t rioid; /* 0xffffffff to match all ids */
+ uint16_t low;
+ uint16_t high;
+};
+
+
+struct rio_portwrite {
+ uint32_t payload[16];
+};
+
+struct rio_pw_filter {
+ uint32_t mask;
+ uint32_t low;
+ uint32_t high;
+};
+
+/* RapidIO base address for inbound requests set to value defined below
+ * indicates that no specific RIO-to-local address translation is requested
+ * and driver should use direct (one-to-one) address mapping.
+*/
+#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0))
+
+struct rio_mmap {
+ uint32_t rioid;
+ uint64_t rio_addr;
+ uint64_t length;
+ uint64_t handle;
+ void *address;
+};
+
+struct rio_dma_mem {
+ uint64_t length; /* length of DMA memory */
+ uint64_t dma_handle; /* handle associated with this memory */
+ void *buffer; /* pointer to this memory */
+};
+
+
+struct rio_event {
+ unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
+ union {
+ struct rio_doorbell doorbell; /* header for RIO_DOORBELL */
+ struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
+ } u;
+};
+
+enum rio_transfer_sync {
+ RIO_TRANSFER_SYNC, /* synchronous transfer */
+ RIO_TRANSFER_ASYNC, /* asynchronous transfer */
+ RIO_TRANSFER_FAF, /* fire-and-forget transfer */
+};
+
+enum rio_transfer_dir {
+ RIO_TRANSFER_DIR_READ, /* Read operation */
+ RIO_TRANSFER_DIR_WRITE, /* Write operation */
+};
+
+/*
+ * RapidIO data exchange transactions are lists of individual transfers. Each
+ * transfer exchanges data between two RapidIO devices by remote direct memory
+ * access and has its own completion code.
+ *
+ * The RapidIO specification defines four types of data exchange requests:
+ * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
+ * to specify the required type of write operation or combination of them when
+ * only the last data packet requires response.
+ *
+ * NREAD: read up to 256 bytes from remote device memory into local memory
+ * NWRITE: write up to 256 bytes from local memory to remote device memory
+ * without confirmation
+ * SWRITE: as NWRITE, but all addresses and payloads must be 64-bit aligned
+ * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
+ *
+ * The default exchange is chosen from NREAD and any of the WRITE modes as the
+ * driver sees fit. For write requests the user can explicitly choose between
+ * any of the write modes for each transaction.
+ */
+enum rio_exchange {
+ RIO_EXCHANGE_DEFAULT, /* Default method */
+ RIO_EXCHANGE_NWRITE, /* All packets using NWRITE */
+ RIO_EXCHANGE_SWRITE, /* All packets using SWRITE */
+ RIO_EXCHANGE_NWRITE_R, /* Last packet NWRITE_R, others NWRITE */
+ RIO_EXCHANGE_SWRITE_R, /* Last packet NWRITE_R, others SWRITE */
+ RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
+};
+
+struct rio_transfer_io {
+ uint32_t rioid; /* Target destID */
+ uint64_t rio_addr; /* Address in target's RIO mem space */
+ enum rio_exchange method; /* Data exchange method */
+ void __user *loc_addr;
+ uint64_t handle;
+ uint64_t offset; /* Offset in buffer */
+ uint64_t length; /* Length in bytes */
+ uint32_t completion_code; /* Completion code for this transfer */
+};
+
+struct rio_transaction {
+ uint32_t transfer_mode; /* Data transfer mode */
+ enum rio_transfer_sync sync; /* Synchronization method */
+ enum rio_transfer_dir dir; /* Transfer direction */
+ size_t count; /* Number of transfers */
+ struct rio_transfer_io __user *block; /* Array of <count> transfers */
+};
+
+struct rio_async_tx_wait {
+ uint32_t token; /* DMA transaction ID token */
+ uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */
+};
+
+#define RIO_MAX_DEVNAME_SZ 20
+
+struct rio_rdev_info {
+ uint32_t destid;
+ uint8_t hopcount;
+ uint32_t comptag;
+ char name[RIO_MAX_DEVNAME_SZ + 1];
+};
+
+/* Driver IOCTL codes */
+#define RIO_MPORT_DRV_MAGIC 'm'
+
+#define RIO_MPORT_MAINT_HDID_SET \
+ _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
+#define RIO_MPORT_MAINT_COMPTAG_SET \
+ _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
+#define RIO_MPORT_MAINT_PORT_IDX_GET \
+ _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
+#define RIO_MPORT_GET_PROPERTIES \
+ _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
+#define RIO_MPORT_MAINT_READ_LOCAL \
+ _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_LOCAL \
+ _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_READ_REMOTE \
+ _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_REMOTE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
+#define RIO_ENABLE_DOORBELL_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
+#define RIO_DISABLE_DOORBELL_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
+#define RIO_ENABLE_PORTWRITE_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
+#define RIO_DISABLE_PORTWRITE_RANGE \
+ _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
+#define RIO_SET_EVENT_MASK \
+ _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
+#define RIO_GET_EVENT_MASK \
+ _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
+#define RIO_MAP_OUTBOUND \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
+#define RIO_UNMAP_OUTBOUND \
+ _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
+#define RIO_MAP_INBOUND \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
+#define RIO_UNMAP_INBOUND \
+ _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
+#define RIO_ALLOC_DMA \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
+#define RIO_FREE_DMA \
+ _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
+#define RIO_TRANSFER \
+ _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
+#define RIO_WAIT_FOR_ASYNC \
+ _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
+#define RIO_DEV_ADD \
+ _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
+#define RIO_DEV_DEL \
+ _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
+
+#endif /* _RIO_MPORT_CDEV_H_ */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 218168a2b5e9..1063ae382bc2 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -238,6 +238,8 @@
#define RIO_PORT_N_ACK_INBOUND 0x3f000000
#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
+#define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20)
+#define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000
#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
@@ -249,6 +251,7 @@
#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
+#define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */
#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a07f42bedda3..49eb4f8ebac9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -86,6 +86,7 @@ enum ttu_flags {
TTU_MIGRATION = 2, /* migration mode */
TTU_MUNLOCK = 4, /* munlock mode */
TTU_LZFREE = 8, /* lazy free mode */
+ TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
@@ -93,6 +94,8 @@ enum ttu_flags {
TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
+ TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
+ * caller holds it */
};
#ifdef CONFIG_MMU
@@ -240,6 +243,8 @@ int page_mkclean(struct page *);
*/
int try_to_munlock(struct page *);
+void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+
/*
* Called by memory-failure.c to kill processes.
*/
@@ -266,6 +271,7 @@ struct rmap_walk_control {
};
int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
new file mode 100644
index 000000000000..e0aca1476001
--- /dev/null
+++ b/include/linux/rmi.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_H
+#define _RMI_H
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define NAME_BUFFER_SIZE 256
+
+/**
+ * struct rmi_2d_axis_alignment - target axis alignment
+ * @swap_axes: set to TRUE if desired to swap x- and y-axis
+ * @flip_x: set to TRUE if desired to flip direction on x-axis
+ * @flip_y: set to TRUE if desired to flip direction on y-axis
+ * @clip_x_low - reported X coordinates below this setting will be clipped to
+ * the specified value
+ * @clip_x_high - reported X coordinates above this setting will be clipped to
+ * the specified value
+ * @clip_y_low - reported Y coordinates below this setting will be clipped to
+ * the specified value
+ * @clip_y_high - reported Y coordinates above this setting will be clipped to
+ * the specified value
+ * @offset_x - this value will be added to all reported X coordinates
+ * @offset_y - this value will be added to all reported Y coordinates
+ * @rel_report_enabled - if set to true, the relative reporting will be
+ * automatically enabled for this sensor.
+ */
+struct rmi_2d_axis_alignment {
+ bool swap_axes;
+ bool flip_x;
+ bool flip_y;
+ u16 clip_x_low;
+ u16 clip_y_low;
+ u16 clip_x_high;
+ u16 clip_y_high;
+ u16 offset_x;
+ u16 offset_y;
+ u8 delta_x_threshold;
+ u8 delta_y_threshold;
+};
+
+/** This is used to override any hints an F11 2D sensor might have provided
+ * as to what type of sensor it is.
+ *
+ * @rmi_f11_sensor_default - do not override, determine from F11_2D_QUERY14 if
+ * available.
+ * @rmi_f11_sensor_touchscreen - treat the sensor as a touchscreen (direct
+ * pointing).
+ * @rmi_f11_sensor_touchpad - thread the sensor as a touchpad (indirect
+ * pointing).
+ */
+enum rmi_sensor_type {
+ rmi_sensor_default = 0,
+ rmi_sensor_touchscreen,
+ rmi_sensor_touchpad
+};
+
+#define RMI_F11_DISABLE_ABS_REPORT BIT(0)
+
+/**
+ * struct rmi_2d_sensor_data - overrides defaults for a 2D sensor.
+ * @axis_align - provides axis alignment overrides (see above).
+ * @sensor_type - Forces the driver to treat the sensor as an indirect
+ * pointing device (touchpad) rather than a direct pointing device
+ * (touchscreen). This is useful when F11_2D_QUERY14 register is not
+ * available.
+ * @disable_report_mask - Force data to not be reported even if it is supported
+ * by the firware.
+ * @topbuttonpad - Used with the "5 buttons touchpads" found on the Lenovo 40
+ * series
+ * @kernel_tracking - most moderns RMI f11 firmwares implement Multifinger
+ * Type B protocol. However, there are some corner cases where the user
+ * triggers some jumps by tapping with two fingers on the touchpad.
+ * Use this setting and dmax to filter out these jumps.
+ * Also, when using an old sensor using MF Type A behavior, set to true to
+ * report an actual MT protocol B.
+ * @dmax - the maximum distance (in sensor units) the kernel tracking allows two
+ * distincts fingers to be considered the same.
+ */
+struct rmi_2d_sensor_platform_data {
+ struct rmi_2d_axis_alignment axis_align;
+ enum rmi_sensor_type sensor_type;
+ int x_mm;
+ int y_mm;
+ int disable_report_mask;
+ u16 rezero_wait;
+ bool topbuttonpad;
+ bool kernel_tracking;
+ int dmax;
+};
+
+/**
+ * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip.
+ * @buttonpad - the touchpad is a buttonpad, so enable only the first actual
+ * button that is found.
+ * @trackstick_buttons - Set when the function 30 is handling the physical
+ * buttons of the trackstick (as a PD/2 passthrough device.
+ * @disable - the touchpad incorrectly reports F30 and it should be ignored.
+ * This is a special case which is due to misconfigured firmware.
+ */
+struct rmi_f30_data {
+ bool buttonpad;
+ bool trackstick_buttons;
+ bool disable;
+};
+
+/**
+ * struct rmi_f01_power - override default power management settings.
+ *
+ */
+enum rmi_f01_nosleep {
+ RMI_F01_NOSLEEP_DEFAULT = 0,
+ RMI_F01_NOSLEEP_OFF = 1,
+ RMI_F01_NOSLEEP_ON = 2
+};
+
+/**
+ * struct rmi_f01_power_management -When non-zero, these values will be written
+ * to the touch sensor to override the default firmware settigns. For a
+ * detailed explanation of what each field does, see the corresponding
+ * documention in the RMI4 specification.
+ *
+ * @nosleep - specifies whether the device is permitted to sleep or doze (that
+ * is, enter a temporary low power state) when no fingers are touching the
+ * sensor.
+ * @wakeup_threshold - controls the capacitance threshold at which the touch
+ * sensor will decide to wake up from that low power state.
+ * @doze_holdoff - controls how long the touch sensor waits after the last
+ * finger lifts before entering the doze state, in units of 100ms.
+ * @doze_interval - controls the interval between checks for finger presence
+ * when the touch sensor is in doze mode, in units of 10ms.
+ */
+struct rmi_f01_power_management {
+ enum rmi_f01_nosleep nosleep;
+ u8 wakeup_threshold;
+ u8 doze_holdoff;
+ u8 doze_interval;
+};
+
+/**
+ * struct rmi_device_platform_data_spi - provides parameters used in SPI
+ * communications. All Synaptics SPI products support a standard SPI
+ * interface; some also support what is called SPI V2 mode, depending on
+ * firmware and/or ASIC limitations. In V2 mode, the touch sensor can
+ * support shorter delays during certain operations, and these are specified
+ * separately from the standard mode delays.
+ *
+ * @block_delay - for standard SPI transactions consisting of both a read and
+ * write operation, the delay (in microseconds) between the read and write
+ * operations.
+ * @split_read_block_delay_us - for V2 SPI transactions consisting of both a
+ * read and write operation, the delay (in microseconds) between the read and
+ * write operations.
+ * @read_delay_us - the delay between each byte of a read operation in normal
+ * SPI mode.
+ * @write_delay_us - the delay between each byte of a write operation in normal
+ * SPI mode.
+ * @split_read_byte_delay_us - the delay between each byte of a read operation
+ * in V2 mode.
+ * @pre_delay_us - the delay before the start of a SPI transaction. This is
+ * typically useful in conjunction with custom chip select assertions (see
+ * below).
+ * @post_delay_us - the delay after the completion of an SPI transaction. This
+ * is typically useful in conjunction with custom chip select assertions (see
+ * below).
+ * @cs_assert - For systems where the SPI subsystem does not control the CS/SSB
+ * line, or where such control is broken, you can provide a custom routine to
+ * handle a GPIO as CS/SSB. This routine will be called at the beginning and
+ * end of each SPI transaction. The RMI SPI implementation will wait
+ * pre_delay_us after this routine returns before starting the SPI transfer;
+ * and post_delay_us after completion of the SPI transfer(s) before calling it
+ * with assert==FALSE.
+ */
+struct rmi_device_platform_data_spi {
+ u32 block_delay_us;
+ u32 split_read_block_delay_us;
+ u32 read_delay_us;
+ u32 write_delay_us;
+ u32 split_read_byte_delay_us;
+ u32 pre_delay_us;
+ u32 post_delay_us;
+ u8 bits_per_word;
+ u16 mode;
+
+ void *cs_assert_data;
+ int (*cs_assert)(const void *cs_assert_data, const bool assert);
+};
+
+/**
+ * struct rmi_device_platform_data - system specific configuration info.
+ *
+ * @reset_delay_ms - after issuing a reset command to the touch sensor, the
+ * driver waits a few milliseconds to give the firmware a chance to
+ * to re-initialize. You can override the default wait period here.
+ */
+struct rmi_device_platform_data {
+ int reset_delay_ms;
+
+ struct rmi_device_platform_data_spi spi_data;
+
+ /* function handler pdata */
+ struct rmi_2d_sensor_platform_data *sensor_pdata;
+ struct rmi_f01_power_management power_management;
+ struct rmi_f30_data *f30_data;
+};
+
+/**
+ * struct rmi_function_descriptor - RMI function base addresses
+ *
+ * @query_base_addr: The RMI Query base address
+ * @command_base_addr: The RMI Command base address
+ * @control_base_addr: The RMI Control base address
+ * @data_base_addr: The RMI Data base address
+ * @interrupt_source_count: The number of irqs this RMI function needs
+ * @function_number: The RMI function number
+ *
+ * This struct is used when iterating the Page Description Table. The addresses
+ * are 16-bit values to include the current page address.
+ *
+ */
+struct rmi_function_descriptor {
+ u16 query_base_addr;
+ u16 command_base_addr;
+ u16 control_base_addr;
+ u16 data_base_addr;
+ u8 interrupt_source_count;
+ u8 function_number;
+ u8 function_version;
+};
+
+struct rmi_device;
+
+/**
+ * struct rmi_transport_dev - represent an RMI transport device
+ *
+ * @dev: Pointer to the communication device, e.g. i2c or spi
+ * @rmi_dev: Pointer to the RMI device
+ * @proto_name: name of the transport protocol (SPI, i2c, etc)
+ * @ops: pointer to transport operations implementation
+ *
+ * The RMI transport device implements the glue between different communication
+ * buses such as I2C and SPI.
+ *
+ */
+struct rmi_transport_dev {
+ struct device *dev;
+ struct rmi_device *rmi_dev;
+
+ const char *proto_name;
+ const struct rmi_transport_ops *ops;
+
+ struct rmi_device_platform_data pdata;
+
+ struct input_dev *input;
+
+ void *attn_data;
+ int attn_size;
+};
+
+/**
+ * struct rmi_transport_ops - defines transport protocol operations.
+ *
+ * @write_block: Writing a block of data to the specified address
+ * @read_block: Read a block of data from the specified address.
+ */
+struct rmi_transport_ops {
+ int (*write_block)(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len);
+ int (*read_block)(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len);
+ int (*reset)(struct rmi_transport_dev *xport, u16 reset_addr);
+};
+
+/**
+ * struct rmi_driver - driver for an RMI4 sensor on the RMI bus.
+ *
+ * @driver: Device driver model driver
+ * @reset_handler: Called when a reset is detected.
+ * @clear_irq_bits: Clear the specified bits in the current interrupt mask.
+ * @set_irq_bist: Set the specified bits in the current interrupt mask.
+ * @store_productid: Callback for cache product id from function 01
+ * @data: Private data pointer
+ *
+ */
+struct rmi_driver {
+ struct device_driver driver;
+
+ int (*reset_handler)(struct rmi_device *rmi_dev);
+ int (*clear_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask);
+ int (*set_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask);
+ int (*store_productid)(struct rmi_device *rmi_dev);
+ int (*set_input_params)(struct rmi_device *rmi_dev,
+ struct input_dev *input);
+ void *data;
+};
+
+/**
+ * struct rmi_device - represents an RMI4 sensor device on the RMI bus.
+ *
+ * @dev: The device created for the RMI bus
+ * @number: Unique number for the device on the bus.
+ * @driver: Pointer to associated driver
+ * @xport: Pointer to the transport interface
+ *
+ */
+struct rmi_device {
+ struct device dev;
+ int number;
+
+ struct rmi_driver *driver;
+ struct rmi_transport_dev *xport;
+
+};
+
+struct rmi_driver_data {
+ struct list_head function_list;
+
+ struct rmi_device *rmi_dev;
+
+ struct rmi_function *f01_container;
+ bool f01_bootloader_mode;
+
+ u32 attn_count;
+ int num_of_irq_regs;
+ int irq_count;
+ unsigned long *irq_status;
+ unsigned long *fn_irq_bits;
+ unsigned long *current_irq_mask;
+ unsigned long *new_irq_mask;
+ struct mutex irq_mutex;
+ struct input_dev *input;
+
+ u8 pdt_props;
+ u8 bsr;
+
+ bool enabled;
+
+ void *data;
+};
+
+int rmi_register_transport_device(struct rmi_transport_dev *xport);
+void rmi_unregister_transport_device(struct rmi_transport_dev *xport);
+int rmi_process_interrupt_requests(struct rmi_device *rmi_dev);
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev);
+int rmi_driver_resume(struct rmi_device *rmi_dev);
+#endif
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h
deleted file mode 100644
index fe3dc64e5aeb..000000000000
--- a/include/linux/rotary_encoder.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __ROTARY_ENCODER_H__
-#define __ROTARY_ENCODER_H__
-
-struct rotary_encoder_platform_data {
- unsigned int steps;
- unsigned int axis;
- unsigned int gpio_a;
- unsigned int gpio_b;
- unsigned int inverted_a;
- unsigned int inverted_b;
- unsigned int steps_per_period;
- bool relative_axis;
- bool rollover;
- bool wakeup_source;
-};
-
-#endif /* __ROTARY_ENCODER_H__ */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3359f0422c6b..b693adac853b 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -89,6 +89,8 @@ struct rtc_class_ops {
int (*set_mmss)(struct device *, unsigned long secs);
int (*read_callback)(struct device *, int data);
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
+ int (*read_offset)(struct device *, long *offset);
+ int (*set_offset)(struct device *, long offset);
};
#define RTC_DEVICE_NAME_SIZE 20
@@ -208,6 +210,8 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
ktime_t expires, ktime_t period);
void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
+int rtc_read_offset(struct rtc_device *rtc, long *offset);
+int rtc_set_offset(struct rtc_device *rtc, long offset);
void rtc_timer_do_work(struct work_struct *work);
static inline bool is_leap_year(unsigned int year)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a10494a94cc3..60bba7e032dc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
+#include <linux/kcov.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
#include <linux/cred.h>
@@ -182,8 +183,6 @@ extern void update_cpu_load_nohz(int active);
static inline void update_cpu_load_nohz(int active) { }
#endif
-extern unsigned long get_parent_ip(unsigned long addr);
-
extern void dump_cpu_task(int cpu);
struct seq_file;
@@ -427,6 +426,7 @@ extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
+extern signed long schedule_timeout_idle(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
@@ -719,6 +719,10 @@ struct signal_struct {
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
+#ifdef CONFIG_NO_HZ_FULL
+ unsigned long tick_dep_mask;
+#endif
+
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
@@ -775,7 +779,6 @@ struct signal_struct {
#endif
#ifdef CONFIG_AUDIT
unsigned audit_tty;
- unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
@@ -920,6 +923,10 @@ static inline int sched_info_on(void)
#endif
}
+#ifdef CONFIG_SCHEDSTATS
+void force_schedstat_enabled(void);
+#endif
+
enum cpu_idle_type {
CPU_IDLE,
CPU_NOT_IDLE,
@@ -1289,6 +1296,8 @@ struct sched_rt_entity {
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
+ unsigned short on_rq;
+ unsigned short on_list;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
@@ -1329,10 +1338,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_new tells if a new instance arrived. If so we must
- * start executing it with full runtime and reset its absolute
- * deadline;
- *
* @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we
* exit the critical section);
@@ -1340,7 +1345,7 @@ struct sched_dl_entity {
* @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job.
*/
- int dl_throttled, dl_new, dl_boosted, dl_yielded;
+ int dl_throttled, dl_boosted, dl_yielded;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -1542,6 +1547,10 @@ struct task_struct {
VTIME_SYS,
} vtime_snap_whence;
#endif
+
+#ifdef CONFIG_NO_HZ_FULL
+ unsigned long tick_dep_mask;
+#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
u64 real_start_time; /* boot based time in nsec */
@@ -1784,8 +1793,8 @@ struct task_struct {
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
- unsigned long timer_slack_ns;
- unsigned long default_timer_slack_ns;
+ u64 timer_slack_ns;
+ u64 default_timer_slack_ns;
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
@@ -1811,6 +1820,16 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
+#ifdef CONFIG_KCOV
+ /* Coverage collection mode enabled for this task (0 if disabled). */
+ enum kcov_mode kcov_mode;
+ /* Size of the kcov_area. */
+ unsigned kcov_size;
+ /* Buffer for coverage collection. */
+ void *kcov_area;
+ /* kcov desciptor wired with this task or NULL. */
+ struct kcov *kcov;
+#endif
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg_in_oom;
gfp_t memcg_oom_gfp_mask;
@@ -1830,6 +1849,9 @@ struct task_struct {
unsigned long task_state_change;
#endif
int pagefault_disabled;
+#ifdef CONFIG_MMU
+ struct task_struct *oom_reaper_list;
+#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/*
@@ -2356,10 +2378,7 @@ static inline void wake_up_nohz_cpu(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_FULL
-extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
-#else
-static inline bool sched_can_stop_tick(void) { return false; }
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
@@ -2855,10 +2874,18 @@ static inline unsigned long stack_not_used(struct task_struct *p)
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+ n--;
+# else
n++;
+# endif
} while (!*n);
+# ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
@@ -3207,4 +3234,13 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#ifdef CONFIG_CPU_FREQ
+struct update_util_data {
+ void (*func)(struct update_util_data *data,
+ u64 time, unsigned long util, unsigned long max);
+};
+
+void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+#endif /* CONFIG_CPU_FREQ */
+
#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index c9e4731cf10b..22db1e63707e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -14,27 +14,6 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
-/*
- * Default maximum number of active map areas, this limits the number of vmas
- * per mm struct. Users can overwrite this number by sysctl but there is a
- * problem.
- *
- * When a program's coredump is generated as ELF format, a section is created
- * per a vma. In ELF, the number of sections is represented in unsigned short.
- * This means the number of sections should be smaller than 65535 at coredump.
- * Because the kernel adds some informative sections to a image of program at
- * generating coredump, we need some margin. The number of extra sections is
- * 1-3 now and depends on arch. We use "5" as safe margin, here.
- *
- * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
- * not a hard limit any more. Although some userspace tools can be surprised by
- * that.
- */
-#define MAPCOUNT_ELF_CORE_MARGIN (5)
-#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-
-extern int sysctl_max_map_count;
-
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
@@ -95,4 +74,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+extern int sysctl_schedstats(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#endif /* _SCHED_SYSCTL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 72ce932c69b2..35de50a65665 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -33,6 +33,7 @@ enum scpi_sensor_class {
VOLTAGE,
CURRENT,
POWER,
+ ENERGY,
};
struct scpi_sensor_info {
@@ -68,7 +69,7 @@ struct scpi_ops {
struct scpi_dvfs_info *(*dvfs_get_info)(u8);
int (*sensor_get_capability)(u16 *sensors);
int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
- int (*sensor_get_value)(u16, u32 *);
+ int (*sensor_get_value)(u16, u64 *);
};
#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
diff --git a/include/linux/security.h b/include/linux/security.h
index 4824a4ccaf1c..157f0cb1e4d2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -24,10 +24,12 @@
#include <linux/key.h>
#include <linux/capability.h>
+#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/fs.h>
struct linux_binprm;
struct cred;
@@ -298,9 +300,11 @@ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
-int security_kernel_fw_from_file(struct file *file, char *buf, size_t size);
int security_kernel_module_request(char *kmod_name);
int security_kernel_module_from_file(struct file *file);
+int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
+int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
+ enum kernel_read_file_id id);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
@@ -850,18 +854,20 @@ static inline int security_kernel_create_files_as(struct cred *cred,
return 0;
}
-static inline int security_kernel_fw_from_file(struct file *file,
- char *buf, size_t size)
+static inline int security_kernel_module_request(char *kmod_name)
{
return 0;
}
-static inline int security_kernel_module_request(char *kmod_name)
+static inline int security_kernel_read_file(struct file *file,
+ enum kernel_read_file_id id)
{
return 0;
}
-static inline int security_kernel_module_from_file(struct file *file)
+static inline int security_kernel_post_read_file(struct file *file,
+ char *buf, loff_t size,
+ enum kernel_read_file_id id)
{
return 0;
}
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index faa0e0370ce7..434879759725 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -76,6 +76,12 @@ struct uart_8250_ops {
void (*release_irq)(struct uart_8250_port *);
};
+struct uart_8250_em485 {
+ struct timer_list start_tx_timer; /* "rs485 start tx" timer */
+ struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */
+ struct timer_list *active_timer; /* pointer to active timer */
+};
+
/*
* This should be used by drivers which want to register
* their own 8250 ports without registering their own
@@ -122,6 +128,8 @@ struct uart_8250_port {
/* 8250 specific callbacks */
int (*dl_read)(struct uart_8250_port *);
void (*dl_write)(struct uart_8250_port *, int);
+
+ struct uart_8250_em485 *em485;
};
static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index e03d6ba5e5b4..cbfcf38e220d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -342,21 +342,26 @@ struct earlycon_device {
struct earlycon_id {
char name[16];
+ char compatible[128];
int (*setup)(struct earlycon_device *, const char *options);
} __aligned(32);
-extern int setup_earlycon(char *buf);
-extern int of_setup_earlycon(unsigned long addr,
- int (*setup)(struct earlycon_device *, const char *));
+extern const struct earlycon_id __earlycon_table[];
+extern const struct earlycon_id __earlycon_table_end[];
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
+ __used __section(__earlycon_table) \
+ = { .name = __stringify(_name), \
+ .compatible = compat, \
+ .setup = fn }
-#define EARLYCON_DECLARE(_name, func) \
- static const struct earlycon_id __earlycon_##_name \
- __used __section(__earlycon_table) \
- = { .name = __stringify(_name), \
- .setup = func }
+#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
-#define OF_EARLYCON_DECLARE(name, compat, fn) \
- _OF_DECLARE(earlycon, name, compat, fn, void *)
+extern int setup_earlycon(char *buf);
+extern int of_setup_earlycon(const struct earlycon_id *match,
+ unsigned long node,
+ const char *options);
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4ce9ff7086f4..15d0df943466 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1161,10 +1161,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
-static inline void skb_sender_cpu_clear(struct sk_buff *skb)
-{
-}
-
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -1985,6 +1981,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
+/**
+ * skb_tailroom_reserve - adjust reserved_tailroom
+ * @skb: buffer to alter
+ * @mtu: maximum amount of headlen permitted
+ * @needed_tailroom: minimum amount of reserved_tailroom
+ *
+ * Set reserved_tailroom so that headlen can be as large as possible but
+ * not larger than mtu and tailroom cannot be smaller than
+ * needed_tailroom.
+ * The required headroom should already have been reserved before using
+ * this function.
+ */
+static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
+ unsigned int needed_tailroom)
+{
+ SKB_LINEAR_ASSERT(skb);
+ if (mtu < skb_tailroom(skb) - needed_tailroom)
+ /* use at most mtu */
+ skb->reserved_tailroom = skb_tailroom(skb) - mtu;
+ else
+ /* use up to all available space */
+ skb->reserved_tailroom = needed_tailroom;
+}
+
#define ENCAP_TYPE_ETHER 0
#define ENCAP_TYPE_IPPROTO 1
@@ -2162,6 +2182,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
return skb->csum_start - skb_headroom(skb);
}
+static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+{
+ return skb->head + skb->csum_start;
+}
+
static inline int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
@@ -2400,6 +2425,10 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
{
return __napi_alloc_skb(napi, length, GFP_ATOMIC);
}
+void napi_consume_skb(struct sk_buff *skb, int budget);
+
+void __kfree_skb_flush(void);
+void __kfree_skb_defer(struct sk_buff *skb);
/**
* __dev_alloc_pages - allocate page for network Rx
@@ -2622,6 +2651,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
skb_headroom(skb) + len <= skb->hdr_len;
}
+static inline int skb_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
@@ -3550,6 +3586,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
struct skb_gso_cb {
int mac_offset;
int encap_level;
+ __wsum csum;
__u16 csum_start;
};
#define SKB_SGO_CB_OFFSET 32
@@ -3576,6 +3613,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
return 0;
}
+static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
+{
+ /* Do not update partial checksums if remote checksum is enabled. */
+ if (skb->remcsum_offload)
+ return;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
+}
+
/* Compute the checksum for a gso segment. First compute the checksum value
* from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
* then add in skb->csum (checksum from csum_start to end of packet).
@@ -3586,15 +3633,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
*/
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
- int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __wsum partial;
+ unsigned char *csum_start = skb_transport_header(skb);
+ int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
+ __wsum partial = SKB_GSO_CB(skb)->csum;
- partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
- skb->csum = res;
- SKB_GSO_CB(skb)->csum_start -= plen;
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
- return csum_fold(partial);
+ return csum_fold(csum_partial(csum_start, plen, partial));
}
static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3684,5 +3730,30 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
return hdr_len + skb_gso_transport_seglen(skb);
}
+/* Local Checksum Offload.
+ * Compute outer checksum based on the assumption that the
+ * inner checksum will be offloaded later.
+ * See Documentation/networking/checksum-offloads.txt for
+ * explanation of how this works.
+ * Fill in outer checksum adjustment (e.g. with sum of outer
+ * pseudo-header) before calling.
+ * Also ensure that inner checksum is in linear data area.
+ */
+static inline __wsum lco_csum(struct sk_buff *skb)
+{
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *l4_hdr = skb_transport_header(skb);
+ __wsum partial;
+
+ /* Start with complement of inner checksum adjustment */
+ partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
+ skb->csum_offset));
+
+ /* Add in checksum of our headers (incl. outer checksum
+ * adjustment filled in by caller) and return result.
+ */
+ return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3627d5c1bc47..508bd827e6dc 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,7 +20,7 @@
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
-#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
@@ -92,6 +92,12 @@
# define SLAB_ACCOUNT 0x00000000UL
#endif
+#ifdef CONFIG_KASAN
+#define SLAB_KASAN 0x08000000UL
+#else
+#define SLAB_KASAN 0x00000000UL
+#endif
+
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
@@ -314,7 +320,7 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment
void kmem_cache_free(struct kmem_cache *, void *);
/*
- * Bulk allocation and freeing operations. These are accellerated in an
+ * Bulk allocation and freeing operations. These are accelerated in an
* allocator specific way to avoid taking locks repeatedly or building
* metadata structures unnecessarily.
*
@@ -323,6 +329,15 @@ void kmem_cache_free(struct kmem_cache *, void *);
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+/*
+ * Caller must not use kfree_bulk() on memory not originally allocated
+ * by kmalloc(), because the SLOB allocator cannot handle this.
+ */
+static __always_inline void kfree_bulk(size_t size, void **p)
+{
+ kmem_cache_free_bulk(NULL, size, p);
+}
+
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
@@ -361,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc(s, flags);
- kasan_kmalloc(s, ret, size);
+ kasan_kmalloc(s, ret, size, flags);
return ret;
}
@@ -372,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
- kasan_kmalloc(s, ret, size);
+ kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cf139d3fa513..9edbbf352340 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -60,6 +60,9 @@ struct kmem_cache {
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+ atomic_t store_user_clean;
+#endif
/*
* If debugging is enabled, then the allocator can add additional
@@ -73,8 +76,22 @@ struct kmem_cache {
#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
#endif
+#ifdef CONFIG_KASAN
+ struct kasan_cache kasan_info;
+#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};
+static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+ void *x) {
+ void *object = x - (x - page->s_mem) % cache->size;
+ void *last_object = page->s_mem + (cache->num - 1) * cache->size;
+
+ if (unlikely(object > last_object))
+ return last_object;
+ else
+ return object;
+}
+
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index b7e57927f521..665cd0cd18b8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -81,6 +81,7 @@ struct kmem_cache {
int reserved; /* Reserved bytes at the end of slabs */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+ int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
@@ -129,4 +130,15 @@ static inline void *virt_to_obj(struct kmem_cache *s,
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason);
+static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+ void *x) {
+ void *object = x - (x - page_address(page)) % cache->size;
+ void *last_object = page_address(page) +
+ (page->objects - 1) * cache->size;
+ if (unlikely(object > last_object))
+ return last_object;
+ else
+ return object;
+}
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
new file mode 100644
index 000000000000..e2e9de1acc5b
--- /dev/null
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header for EXYNOS PMU Driver support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SOC_EXYNOS_PMU_H
+#define __LINUX_SOC_EXYNOS_PMU_H
+
+enum sys_powerdown {
+ SYS_AFTR,
+ SYS_LPA,
+ SYS_SLEEP,
+ NUM_SYS_POWERDOWN,
+};
+
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
+
+#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
new file mode 100644
index 000000000000..d30186e2b609
--- /dev/null
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - Power management unit definition
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H
+#define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__
+
+#define S5P_CENTRAL_SEQ_CONFIGURATION 0x0200
+
+#define S5P_CENTRAL_LOWPWR_CFG (1 << 16)
+
+#define S5P_CENTRAL_SEQ_OPTION 0x0208
+
+#define S5P_USE_STANDBY_WFI0 (1 << 16)
+#define S5P_USE_STANDBY_WFI1 (1 << 17)
+#define S5P_USE_STANDBY_WFI2 (1 << 19)
+#define S5P_USE_STANDBY_WFI3 (1 << 20)
+#define S5P_USE_STANDBY_WFE0 (1 << 24)
+#define S5P_USE_STANDBY_WFE1 (1 << 25)
+#define S5P_USE_STANDBY_WFE2 (1 << 27)
+#define S5P_USE_STANDBY_WFE3 (1 << 28)
+
+#define S5P_USE_STANDBY_WFI_ALL \
+ (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFI1 | \
+ S5P_USE_STANDBY_WFI2 | S5P_USE_STANDBY_WFI3 | \
+ S5P_USE_STANDBY_WFE0 | S5P_USE_STANDBY_WFE1 | \
+ S5P_USE_STANDBY_WFE2 | S5P_USE_STANDBY_WFE3)
+
+#define S5P_USE_DELAYED_RESET_ASSERTION BIT(12)
+
+#define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n)
+#define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28)
+#define EXYNOS_SWRESET 0x0400
+#define EXYNOS5440_SWRESET 0x00C4
+
+#define S5P_WAKEUP_STAT 0x0600
+#define S5P_EINT_WAKEUP_MASK 0x0604
+#define S5P_WAKEUP_MASK 0x0608
+#define S5P_WAKEUP_MASK2 0x0614
+
+#define S5P_INFORM0 0x0800
+#define S5P_INFORM1 0x0804
+#define S5P_INFORM5 0x0814
+#define S5P_INFORM6 0x0818
+#define S5P_INFORM7 0x081C
+#define S5P_PMU_SPARE2 0x0908
+#define S5P_PMU_SPARE3 0x090C
+
+#define EXYNOS_IROM_DATA2 0x0988
+#define S5P_ARM_CORE0_LOWPWR 0x1000
+#define S5P_DIS_IRQ_CORE0 0x1004
+#define S5P_DIS_IRQ_CENTRAL0 0x1008
+#define S5P_ARM_CORE1_LOWPWR 0x1010
+#define S5P_DIS_IRQ_CORE1 0x1014
+#define S5P_DIS_IRQ_CENTRAL1 0x1018
+#define S5P_ARM_COMMON_LOWPWR 0x1080
+#define S5P_L2_0_LOWPWR 0x10C0
+#define S5P_L2_1_LOWPWR 0x10C4
+#define S5P_CMU_ACLKSTOP_LOWPWR 0x1100
+#define S5P_CMU_SCLKSTOP_LOWPWR 0x1104
+#define S5P_CMU_RESET_LOWPWR 0x110C
+#define S5P_APLL_SYSCLK_LOWPWR 0x1120
+#define S5P_MPLL_SYSCLK_LOWPWR 0x1124
+#define S5P_VPLL_SYSCLK_LOWPWR 0x1128
+#define S5P_EPLL_SYSCLK_LOWPWR 0x112C
+#define S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR 0x1138
+#define S5P_CMU_RESET_GPSALIVE_LOWPWR 0x113C
+#define S5P_CMU_CLKSTOP_CAM_LOWPWR 0x1140
+#define S5P_CMU_CLKSTOP_TV_LOWPWR 0x1144
+#define S5P_CMU_CLKSTOP_MFC_LOWPWR 0x1148
+#define S5P_CMU_CLKSTOP_G3D_LOWPWR 0x114C
+#define S5P_CMU_CLKSTOP_LCD0_LOWPWR 0x1150
+#define S5P_CMU_CLKSTOP_MAUDIO_LOWPWR 0x1158
+#define S5P_CMU_CLKSTOP_GPS_LOWPWR 0x115C
+#define S5P_CMU_RESET_CAM_LOWPWR 0x1160
+#define S5P_CMU_RESET_TV_LOWPWR 0x1164
+#define S5P_CMU_RESET_MFC_LOWPWR 0x1168
+#define S5P_CMU_RESET_G3D_LOWPWR 0x116C
+#define S5P_CMU_RESET_LCD0_LOWPWR 0x1170
+#define S5P_CMU_RESET_MAUDIO_LOWPWR 0x1178
+#define S5P_CMU_RESET_GPS_LOWPWR 0x117C
+#define S5P_TOP_BUS_LOWPWR 0x1180
+#define S5P_TOP_RETENTION_LOWPWR 0x1184
+#define S5P_TOP_PWR_LOWPWR 0x1188
+#define S5P_LOGIC_RESET_LOWPWR 0x11A0
+#define S5P_ONENAND_MEM_LOWPWR 0x11C0
+#define S5P_G2D_ACP_MEM_LOWPWR 0x11C8
+#define S5P_USBOTG_MEM_LOWPWR 0x11CC
+#define S5P_HSMMC_MEM_LOWPWR 0x11D0
+#define S5P_CSSYS_MEM_LOWPWR 0x11D4
+#define S5P_SECSS_MEM_LOWPWR 0x11D8
+#define S5P_PAD_RETENTION_DRAM_LOWPWR 0x1200
+#define S5P_PAD_RETENTION_MAUDIO_LOWPWR 0x1204
+#define S5P_PAD_RETENTION_GPIO_LOWPWR 0x1220
+#define S5P_PAD_RETENTION_UART_LOWPWR 0x1224
+#define S5P_PAD_RETENTION_MMCA_LOWPWR 0x1228
+#define S5P_PAD_RETENTION_MMCB_LOWPWR 0x122C
+#define S5P_PAD_RETENTION_EBIA_LOWPWR 0x1230
+#define S5P_PAD_RETENTION_EBIB_LOWPWR 0x1234
+#define S5P_PAD_RETENTION_ISOLATION_LOWPWR 0x1240
+#define S5P_PAD_RETENTION_ALV_SEL_LOWPWR 0x1260
+#define S5P_XUSBXTI_LOWPWR 0x1280
+#define S5P_XXTI_LOWPWR 0x1284
+#define S5P_EXT_REGULATOR_LOWPWR 0x12C0
+#define S5P_GPIO_MODE_LOWPWR 0x1300
+#define S5P_GPIO_MODE_MAUDIO_LOWPWR 0x1340
+#define S5P_CAM_LOWPWR 0x1380
+#define S5P_TV_LOWPWR 0x1384
+#define S5P_MFC_LOWPWR 0x1388
+#define S5P_G3D_LOWPWR 0x138C
+#define S5P_LCD0_LOWPWR 0x1390
+#define S5P_MAUDIO_LOWPWR 0x1398
+#define S5P_GPS_LOWPWR 0x139C
+#define S5P_GPS_ALIVE_LOWPWR 0x13A0
+
+#define EXYNOS_ARM_CORE0_CONFIGURATION 0x2000
+#define EXYNOS_ARM_CORE_CONFIGURATION(_nr) \
+ (EXYNOS_ARM_CORE0_CONFIGURATION + (0x80 * (_nr)))
+#define EXYNOS_ARM_CORE_STATUS(_nr) \
+ (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x4)
+#define EXYNOS_ARM_CORE_OPTION(_nr) \
+ (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x8)
+
+#define EXYNOS_ARM_COMMON_CONFIGURATION 0x2500
+#define EXYNOS_COMMON_CONFIGURATION(_nr) \
+ (EXYNOS_ARM_COMMON_CONFIGURATION + (0x80 * (_nr)))
+#define EXYNOS_COMMON_STATUS(_nr) \
+ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x4)
+#define EXYNOS_COMMON_OPTION(_nr) \
+ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8)
+
+#define EXYNOS_CORE_LOCAL_PWR_EN 0x3
+
+#define EXYNOS_ARM_COMMON_STATUS 0x2504
+#define EXYNOS_COMMON_OPTION(_nr) \
+ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8)
+
+#define EXYNOS_ARM_L2_CONFIGURATION 0x2600
+#define EXYNOS_L2_CONFIGURATION(_nr) \
+ (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80))
+#define EXYNOS_L2_STATUS(_nr) \
+ (EXYNOS_L2_CONFIGURATION(_nr) + 0x4)
+#define EXYNOS_L2_OPTION(_nr) \
+ (EXYNOS_L2_CONFIGURATION(_nr) + 0x8)
+#define EXYNOS_L2_COMMON_PWR_EN 0x3
+
+#define EXYNOS_ARM_CORE_X_STATUS_OFFSET 0x4
+
+#define EXYNOS5_APLL_SYSCLK_CONFIGURATION 0x2A00
+#define EXYNOS5_APLL_SYSCLK_STATUS 0x2A04
+
+#define EXYNOS5_ARM_L2_OPTION 0x2608
+#define EXYNOS5_USE_RETENTION BIT(4)
+
+#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3)
+
+#define S5P_PAD_RET_MAUDIO_OPTION 0x3028
+#define S5P_PAD_RET_MMC2_OPTION 0x30c8
+#define S5P_PAD_RET_GPIO_OPTION 0x3108
+#define S5P_PAD_RET_UART_OPTION 0x3128
+#define S5P_PAD_RET_MMCA_OPTION 0x3148
+#define S5P_PAD_RET_MMCB_OPTION 0x3168
+#define S5P_PAD_RET_EBIA_OPTION 0x3188
+#define S5P_PAD_RET_EBIB_OPTION 0x31A8
+#define S5P_PAD_RET_SPI_OPTION 0x31c8
+
+#define S5P_PS_HOLD_CONTROL 0x330C
+#define S5P_PS_HOLD_EN (1 << 31)
+#define S5P_PS_HOLD_OUTPUT_HIGH (3 << 8)
+
+#define S5P_CAM_OPTION 0x3C08
+#define S5P_MFC_OPTION 0x3C48
+#define S5P_G3D_OPTION 0x3C68
+#define S5P_LCD0_OPTION 0x3C88
+#define S5P_LCD1_OPTION 0x3CA8
+#define S5P_ISP_OPTION S5P_LCD1_OPTION
+
+#define S5P_CORE_LOCAL_PWR_EN 0x3
+#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
+#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
+
+/* Only for EXYNOS4210 */
+#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
+#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
+#define S5P_MODIMIF_MEM_LOWPWR 0x11C4
+#define S5P_PCIE_MEM_LOWPWR 0x11E0
+#define S5P_SATA_MEM_LOWPWR 0x11E4
+#define S5P_LCD1_LOWPWR 0x1394
+
+/* Only for EXYNOS4x12 */
+#define S5P_ISP_ARM_LOWPWR 0x1050
+#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054
+#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058
+#define S5P_CMU_ACLKSTOP_COREBLK_LOWPWR 0x1110
+#define S5P_CMU_SCLKSTOP_COREBLK_LOWPWR 0x1114
+#define S5P_CMU_RESET_COREBLK_LOWPWR 0x111C
+#define S5P_MPLLUSER_SYSCLK_LOWPWR 0x1130
+#define S5P_CMU_CLKSTOP_ISP_LOWPWR 0x1154
+#define S5P_CMU_RESET_ISP_LOWPWR 0x1174
+#define S5P_TOP_BUS_COREBLK_LOWPWR 0x1190
+#define S5P_TOP_RETENTION_COREBLK_LOWPWR 0x1194
+#define S5P_TOP_PWR_COREBLK_LOWPWR 0x1198
+#define S5P_OSCCLK_GATE_LOWPWR 0x11A4
+#define S5P_LOGIC_RESET_COREBLK_LOWPWR 0x11B0
+#define S5P_OSCCLK_GATE_COREBLK_LOWPWR 0x11B4
+#define S5P_HSI_MEM_LOWPWR 0x11C4
+#define S5P_ROTATOR_MEM_LOWPWR 0x11DC
+#define S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR 0x123C
+#define S5P_PAD_ISOLATION_COREBLK_LOWPWR 0x1250
+#define S5P_GPIO_MODE_COREBLK_LOWPWR 0x1320
+#define S5P_TOP_ASB_RESET_LOWPWR 0x1344
+#define S5P_TOP_ASB_ISOLATION_LOWPWR 0x1348
+#define S5P_ISP_LOWPWR 0x1394
+#define S5P_DRAM_FREQ_DOWN_LOWPWR 0x13B0
+#define S5P_DDRPHY_DLLOFF_LOWPWR 0x13B4
+#define S5P_CMU_SYSCLK_ISP_LOWPWR 0x13B8
+#define S5P_CMU_SYSCLK_GPS_LOWPWR 0x13BC
+#define S5P_LPDDR_PHY_DLL_LOCK_LOWPWR 0x13C0
+
+#define S5P_ARM_L2_0_OPTION 0x2608
+#define S5P_ARM_L2_1_OPTION 0x2628
+#define S5P_ONENAND_MEM_OPTION 0x2E08
+#define S5P_HSI_MEM_OPTION 0x2E28
+#define S5P_G2D_ACP_MEM_OPTION 0x2E48
+#define S5P_USBOTG_MEM_OPTION 0x2E68
+#define S5P_HSMMC_MEM_OPTION 0x2E88
+#define S5P_CSSYS_MEM_OPTION 0x2EA8
+#define S5P_SECSS_MEM_OPTION 0x2EC8
+#define S5P_ROTATOR_MEM_OPTION 0x2F48
+
+/* Only for EXYNOS4412 */
+#define S5P_ARM_CORE2_LOWPWR 0x1020
+#define S5P_DIS_IRQ_CORE2 0x1024
+#define S5P_DIS_IRQ_CENTRAL2 0x1028
+#define S5P_ARM_CORE3_LOWPWR 0x1030
+#define S5P_DIS_IRQ_CORE3 0x1034
+#define S5P_DIS_IRQ_CENTRAL3 0x1038
+
+/* Only for EXYNOS3XXX */
+#define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000
+#define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
+#define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
+#define EXYNOS3_ARM_CORE1_SYS_PWR_REG 0x1010
+#define EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014
+#define EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018
+#define EXYNOS3_ISP_ARM_SYS_PWR_REG 0x1050
+#define EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054
+#define EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058
+#define EXYNOS3_ARM_COMMON_SYS_PWR_REG 0x1080
+#define EXYNOS3_ARM_L2_SYS_PWR_REG 0x10C0
+#define EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG 0x1100
+#define EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG 0x1104
+#define EXYNOS3_CMU_RESET_SYS_PWR_REG 0x110C
+#define EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG 0x1110
+#define EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG 0x1114
+#define EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG 0x111C
+#define EXYNOS3_APLL_SYSCLK_SYS_PWR_REG 0x1120
+#define EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG 0x1124
+#define EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG 0x1128
+#define EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG 0x112C
+#define EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1130
+#define EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1134
+#define EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG 0x1138
+#define EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140
+#define EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148
+#define EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C
+#define EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150
+#define EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1154
+#define EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158
+#define EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG 0x1160
+#define EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG 0x1168
+#define EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG 0x116C
+#define EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG 0x1170
+#define EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG 0x1174
+#define EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178
+#define EXYNOS3_TOP_BUS_SYS_PWR_REG 0x1180
+#define EXYNOS3_TOP_RETENTION_SYS_PWR_REG 0x1184
+#define EXYNOS3_TOP_PWR_SYS_PWR_REG 0x1188
+#define EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG 0x1190
+#define EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG 0x1194
+#define EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG 0x1198
+#define EXYNOS3_LOGIC_RESET_SYS_PWR_REG 0x11A0
+#define EXYNOS3_OSCCLK_GATE_SYS_PWR_REG 0x11A4
+#define EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG 0x11B0
+#define EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG 0x11B4
+#define EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
+#define EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204
+#define EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG 0x1208
+#define EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1218
+#define EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
+#define EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG 0x1224
+#define EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1228
+#define EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG 0x122C
+#define EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
+#define EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
+#define EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1238
+#define EXYNOS3_PAD_ISOLATION_SYS_PWR_REG 0x1240
+#define EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG 0x1260
+#define EXYNOS3_XUSBXTI_SYS_PWR_REG 0x1280
+#define EXYNOS3_XXTI_SYS_PWR_REG 0x1284
+#define EXYNOS3_EXT_REGULATOR_SYS_PWR_REG 0x12C0
+#define EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG 0x12C4
+#define EXYNOS3_GPIO_MODE_SYS_PWR_REG 0x1300
+#define EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340
+#define EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG 0x1344
+#define EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348
+#define EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG 0x1350
+#define EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG 0x1354
+#define EXYNOS3_CAM_SYS_PWR_REG 0x1380
+#define EXYNOS3_MFC_SYS_PWR_REG 0x1388
+#define EXYNOS3_G3D_SYS_PWR_REG 0x138C
+#define EXYNOS3_LCD0_SYS_PWR_REG 0x1390
+#define EXYNOS3_ISP_SYS_PWR_REG 0x1394
+#define EXYNOS3_MAUDIO_SYS_PWR_REG 0x1398
+#define EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG 0x13B0
+#define EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG 0x13B4
+#define EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13B8
+#define EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG 0x13C0
+#define EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG 0x13C4
+#define EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG 0x13C8
+
+#define EXYNOS3_ARM_CORE0_OPTION 0x2008
+#define EXYNOS3_ARM_CORE_OPTION(_nr) \
+ (EXYNOS3_ARM_CORE0_OPTION + ((_nr) * 0x80))
+
+#define EXYNOS3_ARM_COMMON_OPTION 0x2408
+#define EXYNOS3_ARM_L2_OPTION 0x2608
+#define EXYNOS3_TOP_PWR_OPTION 0x2C48
+#define EXYNOS3_CORE_TOP_PWR_OPTION 0x2CA8
+#define EXYNOS3_XUSBXTI_DURATION 0x341C
+#define EXYNOS3_XXTI_DURATION 0x343C
+#define EXYNOS3_EXT_REGULATOR_DURATION 0x361C
+#define EXYNOS3_EXT_REGULATOR_COREBLK_DURATION 0x363C
+#define XUSBXTI_DURATION 0x00000BB8
+#define XXTI_DURATION XUSBXTI_DURATION
+#define EXT_REGULATOR_DURATION 0x00001D4C
+#define EXT_REGULATOR_COREBLK_DURATION EXT_REGULATOR_DURATION
+
+/* for XXX_OPTION */
+#define EXYNOS3_OPTION_USE_SC_COUNTER (1 << 0)
+#define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1)
+#define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
+
+/* For EXYNOS5 */
+
+#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
+#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
+
+#define EXYNOS5_USE_RETENTION BIT(4)
+#define EXYNOS5_SYS_WDTRESET (1 << 20)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG 0x1000
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG 0x1010
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG 0x1040
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG 0x1048
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG 0x1050
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG 0x1080
+#define EXYNOS5_ARM_L2_SYS_PWR_REG 0x10C0
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG 0x1100
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG 0x1104
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG 0x110C
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG 0x1120
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG 0x1124
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG 0x112C
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG 0x1130
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG 0x1134
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG 0x1138
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG 0x1140
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG 0x1144
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG 0x1148
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG 0x114C
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG 0x1150
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG 0x1154
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1164
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1170
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG 0x1180
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG 0x1184
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG 0x1188
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG 0x1190
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG 0x1194
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG 0x1198
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG 0x11A0
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG 0x11A4
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG 0x11B0
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG 0x11B4
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG 0x11C0
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG 0x11C8
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG 0x11CC
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG 0x11D0
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG 0x11D4
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG 0x11D8
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG 0x11DC
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG 0x11E0
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG 0x11E4
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG 0x11E8
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG 0x11EC
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG 0x11F4
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG 0x1208
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG 0x1238
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG 0x123C
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG 0x1240
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG 0x1250
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG 0x1260
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG 0x1280
+#define EXYNOS5_XXTI_SYS_PWR_REG 0x1284
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG 0x12C0
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG 0x1300
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG 0x1320
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG 0x1340
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG 0x1344
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348
+#define EXYNOS5_GSCL_SYS_PWR_REG 0x1400
+#define EXYNOS5_ISP_SYS_PWR_REG 0x1404
+#define EXYNOS5_MFC_SYS_PWR_REG 0x1408
+#define EXYNOS5_G3D_SYS_PWR_REG 0x140C
+#define EXYNOS5_DISP1_SYS_PWR_REG 0x1414
+#define EXYNOS5_MAU_SYS_PWR_REG 0x1418
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG 0x1480
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1484
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1488
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x148C
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1494
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1498
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG 0x14C0
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG 0x14C4
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG 0x14C8
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG 0x14CC
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D4
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D8
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG 0x1580
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG 0x1584
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG 0x1588
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG 0x158C
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG 0x1594
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG 0x1598
+
+#define EXYNOS5_ARM_CORE0_OPTION 0x2008
+#define EXYNOS5_ARM_CORE1_OPTION 0x2088
+#define EXYNOS5_FSYS_ARM_OPTION 0x2208
+#define EXYNOS5_ISP_ARM_OPTION 0x2288
+#define EXYNOS5_ARM_COMMON_OPTION 0x2408
+#define EXYNOS5_ARM_L2_OPTION 0x2608
+#define EXYNOS5_TOP_PWR_OPTION 0x2C48
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION 0x2CC8
+#define EXYNOS5_JPEG_MEM_OPTION 0x2F48
+#define EXYNOS5_GSCL_OPTION 0x4008
+#define EXYNOS5_ISP_OPTION 0x4028
+#define EXYNOS5_MFC_OPTION 0x4048
+#define EXYNOS5_G3D_OPTION 0x4068
+#define EXYNOS5_DISP1_OPTION 0x40A8
+#define EXYNOS5_MAU_OPTION 0x40C8
+
+#define EXYNOS5_USE_SC_FEEDBACK (1 << 1)
+#define EXYNOS5_USE_SC_COUNTER (1 << 0)
+
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
+
+#define EXYNOS5420_SWRESET_KFC_SEL 0x3
+
+/* Only for EXYNOS5420 */
+#define EXYNOS5420_ISP_ARM_OPTION 0x2488
+#define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3)
+
+#define EXYNOS5420_LPI_MASK 0x0004
+#define EXYNOS5420_LPI_MASK1 0x0008
+#define EXYNOS5420_UFS BIT(8)
+#define EXYNOS5420_ATB_KFC BIT(13)
+#define EXYNOS5420_ATB_ISP_ARM BIT(19)
+#define EXYNOS5420_EMULATION BIT(31)
+#define ATB_ISP_ARM BIT(12)
+#define ATB_KFC BIT(13)
+#define ATB_NOC BIT(14)
+
+#define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100
+#define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104
+#define EXYNOS5420_UP_SCHEDULER 0x0120
+#define SPREAD_ENABLE 0xF
+#define SPREAD_USE_STANDWFI 0xF
+
+#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr) \
+ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
+#define EXYNOS5420_BB_CON1 0x0784
+#define EXYNOS5420_BB_SEL_EN BIT(31)
+#define EXYNOS5420_BB_PMOS_EN BIT(7)
+#define EXYNOS5420_BB_1300X 0XF
+
+#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020
+#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024
+#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028
+#define EXYNOS5420_ARM_CORE3_SYS_PWR_REG 0x1030
+#define EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG 0x1034
+#define EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG 0x1038
+#define EXYNOS5420_KFC_CORE0_SYS_PWR_REG 0x1040
+#define EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG 0x1044
+#define EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG 0x1048
+#define EXYNOS5420_KFC_CORE1_SYS_PWR_REG 0x1050
+#define EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG 0x1054
+#define EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG 0x1058
+#define EXYNOS5420_KFC_CORE2_SYS_PWR_REG 0x1060
+#define EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG 0x1064
+#define EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG 0x1068
+#define EXYNOS5420_KFC_CORE3_SYS_PWR_REG 0x1070
+#define EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG 0x1074
+#define EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG 0x1078
+#define EXYNOS5420_ISP_ARM_SYS_PWR_REG 0x1090
+#define EXYNOS5420_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1094
+#define EXYNOS5420_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1098
+#define EXYNOS5420_ARM_COMMON_SYS_PWR_REG 0x10A0
+#define EXYNOS5420_KFC_COMMON_SYS_PWR_REG 0x10B0
+#define EXYNOS5420_KFC_L2_SYS_PWR_REG 0x10D0
+#define EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG 0x1158
+#define EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG 0x115C
+#define EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG 0x1160
+#define EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG 0x1174
+#define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178
+#define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8
+#define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC
+#define EXYNOS5420_ONENANDXL_MEM_SYS_PWR 0x11C0
+#define EXYNOS5420_USBDEV_MEM_SYS_PWR 0x11CC
+#define EXYNOS5420_USBDEV1_MEM_SYS_PWR 0x11D0
+#define EXYNOS5420_SDMMC_MEM_SYS_PWR 0x11D4
+#define EXYNOS5420_CSSYS_MEM_SYS_PWR 0x11D8
+#define EXYNOS5420_SECSS_MEM_SYS_PWR 0x11DC
+#define EXYNOS5420_ROTATOR_MEM_SYS_PWR 0x11E0
+#define EXYNOS5420_INTRAM_MEM_SYS_PWR 0x11E4
+#define EXYNOS5420_INTROM_MEM_SYS_PWR 0x11E8
+#define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208
+#define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210
+#define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214
+#define EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1218
+#define EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG 0x121C
+#define EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1220
+#define EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG 0x1224
+#define EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1228
+#define EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG 0x122C
+#define EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG 0x1230
+#define EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG 0x1234
+#define EXYNOS5420_DISP1_SYS_PWR_REG 0x1410
+#define EXYNOS5420_MAU_SYS_PWR_REG 0x1414
+#define EXYNOS5420_G2D_SYS_PWR_REG 0x1418
+#define EXYNOS5420_MSC_SYS_PWR_REG 0x141C
+#define EXYNOS5420_FSYS_SYS_PWR_REG 0x1420
+#define EXYNOS5420_FSYS2_SYS_PWR_REG 0x1424
+#define EXYNOS5420_PSGEN_SYS_PWR_REG 0x1428
+#define EXYNOS5420_PERIC_SYS_PWR_REG 0x142C
+#define EXYNOS5420_WCORE_SYS_PWR_REG 0x1430
+#define EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1490
+#define EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1494
+#define EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG 0x1498
+#define EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG 0x149C
+#define EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG 0x14A0
+#define EXYNOS5420_CMU_CLKSTOP_FSYS2_SYS_PWR_REG 0x14A4
+#define EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG 0x14A8
+#define EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG 0x14AC
+#define EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG 0x14B0
+#define EXYNOS5420_CMU_SYSCLK_TOPPWR_SYS_PWR_REG 0x14BC
+#define EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D0
+#define EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D4
+#define EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG 0x14D8
+#define EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG 0x14DC
+#define EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG 0x14E0
+#define EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG 0x14E4
+#define EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG 0x14E8
+#define EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG 0x14EC
+#define EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG 0x14F0
+#define EXYNOS5420_CMU_SYSCLK_SYSMEM_TOPPWR_SYS_PWR_REG 0x14F4
+#define EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG 0x1570
+#define EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG 0x1574
+#define EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG 0x1578
+#define EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG 0x157C
+#define EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG 0x1590
+#define EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG 0x1594
+#define EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG 0x1598
+#define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C
+#define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0
+#define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4
+#define EXYNOS_ARM_CORE2_CONFIGURATION 0x2100
+#define EXYNOS5420_ARM_CORE2_OPTION 0x2108
+#define EXYNOS_ARM_CORE3_CONFIGURATION 0x2180
+#define EXYNOS5420_ARM_CORE3_OPTION 0x2188
+#define EXYNOS5420_ARM_COMMON_STATUS 0x2504
+#define EXYNOS5420_ARM_COMMON_OPTION 0x2508
+#define EXYNOS5420_KFC_COMMON_STATUS 0x2584
+#define EXYNOS5420_KFC_COMMON_OPTION 0x2588
+#define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C
+
+#define EXYNOS5420_PAD_RET_GPIO_OPTION 0x30C8
+#define EXYNOS5420_PAD_RET_UART_OPTION 0x30E8
+#define EXYNOS5420_PAD_RET_MMCA_OPTION 0x3108
+#define EXYNOS5420_PAD_RET_MMCB_OPTION 0x3128
+#define EXYNOS5420_PAD_RET_MMCC_OPTION 0x3148
+#define EXYNOS5420_PAD_RET_HSI_OPTION 0x3168
+#define EXYNOS5420_PAD_RET_SPI_OPTION 0x31C8
+#define EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION 0x31E8
+#define EXYNOS_PAD_RET_DRAM_OPTION 0x3008
+#define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028
+#define EXYNOS_PAD_RET_JTAG_OPTION 0x3048
+#define EXYNOS_PAD_RET_GPIO_OPTION 0x3108
+#define EXYNOS_PAD_RET_UART_OPTION 0x3128
+#define EXYNOS_PAD_RET_MMCA_OPTION 0x3148
+#define EXYNOS_PAD_RET_MMCB_OPTION 0x3168
+#define EXYNOS_PAD_RET_EBIA_OPTION 0x3188
+#define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8
+
+#define EXYNOS_PS_HOLD_CONTROL 0x330C
+
+/* For SYS_PWR_REG */
+#define EXYNOS_SYS_PWR_CFG BIT(0)
+
+#define EXYNOS5420_MFC_CONFIGURATION 0x4060
+#define EXYNOS5420_MFC_STATUS 0x4064
+#define EXYNOS5420_MFC_OPTION 0x4068
+#define EXYNOS5420_G3D_CONFIGURATION 0x4080
+#define EXYNOS5420_G3D_STATUS 0x4084
+#define EXYNOS5420_G3D_OPTION 0x4088
+#define EXYNOS5420_DISP0_CONFIGURATION 0x40A0
+#define EXYNOS5420_DISP0_STATUS 0x40A4
+#define EXYNOS5420_DISP0_OPTION 0x40A8
+#define EXYNOS5420_DISP1_CONFIGURATION 0x40C0
+#define EXYNOS5420_DISP1_STATUS 0x40C4
+#define EXYNOS5420_DISP1_OPTION 0x40C8
+#define EXYNOS5420_MAU_CONFIGURATION 0x40E0
+#define EXYNOS5420_MAU_STATUS 0x40E4
+#define EXYNOS5420_MAU_OPTION 0x40E8
+#define EXYNOS5420_FSYS2_OPTION 0x4168
+#define EXYNOS5420_PSGEN_OPTION 0x4188
+
+/* For EXYNOS_CENTRAL_SEQ_OPTION */
+#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
+#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
+#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
+#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
+
+#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
+#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
+#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
+#define EXYNOS5420_ARM_USE_STANDBY_WFI3 BIT(7)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI0 BIT(8)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI1 BIT(9)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI2 BIT(10)
+#define EXYNOS5420_KFC_USE_STANDBY_WFI3 BIT(11)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE0 BIT(16)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE1 BIT(17)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE2 BIT(18)
+#define EXYNOS5420_ARM_USE_STANDBY_WFE3 BIT(19)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE0 BIT(20)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE1 BIT(21)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE2 BIT(22)
+#define EXYNOS5420_KFC_USE_STANDBY_WFE3 BIT(23)
+
+#define DUR_WAIT_RESET 0xF
+
+#define EXYNOS5420_USE_STANDBY_WFI_ALL (EXYNOS5420_ARM_USE_STANDBY_WFI0 \
+ | EXYNOS5420_ARM_USE_STANDBY_WFI1 \
+ | EXYNOS5420_ARM_USE_STANDBY_WFI2 \
+ | EXYNOS5420_ARM_USE_STANDBY_WFI3 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI0 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI1 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI2 \
+ | EXYNOS5420_KFC_USE_STANDBY_WFI3)
+
+#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
new file mode 100644
index 000000000000..eac8e0c6fe11
--- /dev/null
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -0,0 +1,35 @@
+/*
+ * Texas Instruments' Message Manager
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TI_MSGMGR_H
+#define TI_MSGMGR_H
+
+/**
+ * struct ti_msgmgr_message - Message Manager structure
+ * @len: Length of data in the Buffer
+ * @buf: Buffer pointer
+ *
+ * This is the structure for data used in mbox_send_message
+ * the length of data buffer used depends on the SoC integration
+ * parameters - each message may be 64, 128 bytes long depending
+ * on SoC. Client is supposed to be aware of this.
+ */
+struct ti_msgmgr_message {
+ size_t len;
+ u8 *buf;
+};
+
+#endif /* TI_MSGMGR_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5bf59c8493b7..73bf6c6a833b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -200,7 +200,9 @@ struct ucred {
#define AF_ALG 38 /* Algorithm sockets */
#define AF_NFC 39 /* NFC sockets */
#define AF_VSOCK 40 /* vSockets */
-#define AF_MAX 41 /* For now.. */
+#define AF_KCM 41 /* Kernel Connection Multiplexor*/
+
+#define AF_MAX 42 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -246,6 +248,7 @@ struct ucred {
#define PF_ALG AF_ALG
#define PF_NFC AF_NFC
#define PF_VSOCK AF_VSOCK
+#define PF_KCM AF_KCM
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -274,6 +277,7 @@ struct ucred {
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
@@ -322,6 +326,7 @@ struct ucred {
#define SOL_CAIF 278
#define SOL_ALG 279
#define SOL_NFC 280
+#define SOL_KCM 281
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index 403e007aef68..e34e169f9dcb 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -30,8 +30,6 @@ struct spi_eeprom {
*/
#define EE_INSTR_BIT3_IS_ADDR 0x0010
- /* for exporting this chip's data to other kernel code */
- void (*setup)(struct memory_accessor *mem, void *context);
void *context;
};
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 53be3a4c60cb..857a9a1d82b5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -25,6 +25,7 @@
struct dma_chan;
struct spi_master;
struct spi_transfer;
+struct spi_flash_read_message;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -53,6 +54,10 @@ extern struct bus_type spi_bus_type;
*
* @transfer_bytes_histo:
* transfer bytes histogramm
+ *
+ * @transfers_split_maxsize:
+ * number of transfers that have been split because of
+ * maxsize limit
*/
struct spi_statistics {
spinlock_t lock; /* lock for the whole structure */
@@ -72,6 +77,8 @@ struct spi_statistics {
#define SPI_STATISTICS_HISTO_SIZE 17
unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+
+ unsigned long transfers_split_maxsize;
};
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -303,6 +310,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @min_speed_hz: Lowest supported transfer speed
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
+ * @max_transfer_size: function that returns the max transfer size for
+ * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for SPI bus locking
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +370,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @unprepare_message: undo any work done by prepare_message().
+ * @spi_flash_read: to support spi-controller hardwares that provide
+ * accelerated interface to read from flash devices.
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -369,6 +380,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
* @dummy_tx: dummy transmit buffer for full-duplex devices
+ * @fw_translate_cs: If the boot firmware uses different numbering scheme
+ * what Linux expects, this optional hook can be used to translate
+ * between the two.
*
* Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -513,6 +527,8 @@ struct spi_master {
struct spi_message *message);
int (*unprepare_message)(struct spi_master *master,
struct spi_message *message);
+ int (*spi_flash_read)(struct spi_device *spi,
+ struct spi_flash_read_message *msg);
/*
* These hooks are for drivers that use a generic implementation
@@ -537,6 +553,8 @@ struct spi_master {
/* dummy data for full duplex devices */
void *dummy_rx;
void *dummy_tx;
+
+ int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
};
static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -582,6 +600,38 @@ extern void spi_unregister_master(struct spi_master *master);
extern struct spi_master *spi_busnum_to_master(u16 busnum);
+/*
+ * SPI resource management while processing a SPI message
+ */
+
+typedef void (*spi_res_release_t)(struct spi_master *master,
+ struct spi_message *msg,
+ void *res);
+
+/**
+ * struct spi_res - spi resource management structure
+ * @entry: list entry
+ * @release: release code called prior to freeing this resource
+ * @data: extra data allocated for the specific use-case
+ *
+ * this is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing
+ */
+struct spi_res {
+ struct list_head entry;
+ spi_res_release_t release;
+ unsigned long long data[]; /* guarantee ull alignment */
+};
+
+extern void *spi_res_alloc(struct spi_device *spi,
+ spi_res_release_t release,
+ size_t size, gfp_t gfp);
+extern void spi_res_add(struct spi_message *message, void *res);
+extern void spi_res_free(void *res);
+
+extern void spi_res_release(struct spi_master *master,
+ struct spi_message *message);
+
/*---------------------------------------------------------------------------*/
/*
@@ -720,6 +770,7 @@ struct spi_transfer {
* @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the spi message is processed
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -766,11 +817,15 @@ struct spi_message {
*/
struct list_head queue;
void *state;
+
+ /* list of spi_res reources when the spi message is processed */
+ struct list_head resources;
};
static inline void spi_message_init_no_memset(struct spi_message *m)
{
INIT_LIST_HEAD(&m->transfers);
+ INIT_LIST_HEAD(&m->resources);
}
static inline void spi_message_init(struct spi_message *m)
@@ -854,6 +909,60 @@ spi_max_transfer_size(struct spi_device *spi)
/*---------------------------------------------------------------------------*/
+/* SPI transfer replacement methods which make use of spi_res */
+
+struct spi_replaced_transfers;
+typedef void (*spi_replaced_release_t)(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_replaced_transfers *res);
+/**
+ * struct spi_replaced_transfers - structure describing the spi_transfer
+ * replacements that have occurred
+ * so that they can get reverted
+ * @release: some extra release code to get executed prior to
+ * relasing this structure
+ * @extradata: pointer to some extra data if requested or NULL
+ * @replaced_transfers: transfers that have been replaced and which need
+ * to get restored
+ * @replaced_after: the transfer after which the @replaced_transfers
+ * are to get re-inserted
+ * @inserted: number of transfers inserted
+ * @inserted_transfers: array of spi_transfers of array-size @inserted,
+ * that have been replacing replaced_transfers
+ *
+ * note: that @extradata will point to @inserted_transfers[@inserted]
+ * if some extra allocation is requested, so alignment will be the same
+ * as for spi_transfers
+ */
+struct spi_replaced_transfers {
+ spi_replaced_release_t release;
+ void *extradata;
+ struct list_head replaced_transfers;
+ struct list_head *replaced_after;
+ size_t inserted;
+ struct spi_transfer inserted_transfers[];
+};
+
+extern struct spi_replaced_transfers *spi_replace_transfers(
+ struct spi_message *msg,
+ struct spi_transfer *xfer_first,
+ size_t remove,
+ size_t insert,
+ spi_replaced_release_t release,
+ size_t extradatasize,
+ gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
+/* SPI transfer transformation methods */
+
+extern int spi_split_transfers_maxsize(struct spi_master *master,
+ struct spi_message *msg,
+ size_t maxsize,
+ gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
/* All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
@@ -1019,6 +1128,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
return be16_to_cpu(result);
}
+/**
+ * struct spi_flash_read_message - flash specific information for
+ * spi-masters that provide accelerated flash read interfaces
+ * @buf: buffer to read data
+ * @from: offset within the flash from where data is to be read
+ * @len: length of data to be read
+ * @retlen: actual length of data read
+ * @read_opcode: read_opcode to be used to communicate with flash
+ * @addr_width: number of address bytes
+ * @dummy_bytes: number of dummy bytes
+ * @opcode_nbits: number of lines to send opcode
+ * @addr_nbits: number of lines to send address
+ * @data_nbits: number of lines for data
+ */
+struct spi_flash_read_message {
+ void *buf;
+ loff_t from;
+ size_t len;
+ size_t retlen;
+ u8 read_opcode;
+ u8 addr_width;
+ u8 dummy_bytes;
+ u8 opcode_nbits;
+ u8 addr_nbits;
+ u8 data_nbits;
+};
+
+/* SPI core interface for flash read support */
+static inline bool spi_flash_read_supported(struct spi_device *spi)
+{
+ return spi->master->spi_flash_read ? true : false;
+}
+
+int spi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg);
+
/*---------------------------------------------------------------------------*/
/*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index f5f80c5643ac..dc8eb63c6568 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
}
/*
- * define and init a srcu struct at build time.
- * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
new file mode 100644
index 000000000000..7978b3e2c1e1
--- /dev/null
+++ b/include/linux/stackdepot.h
@@ -0,0 +1,32 @@
+/*
+ * A generic stack depot implementation
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_STACKDEPOT_H
+#define _LINUX_STACKDEPOT_H
+
+typedef u32 depot_stack_handle_t;
+
+struct stack_trace;
+
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+
+#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 9d0083d364e6..1a79ed8e43da 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -67,6 +67,16 @@ struct stm_device;
* description. That is, the lowest master that can be allocated to software
* writers is @sw_start and data from this writer will appear is @sw_start
* master in the STP stream.
+ *
+ * The @packet callback should adhere to the following rules:
+ * 1) it must return the number of bytes it consumed from the payload;
+ * 2) therefore, if it sent a packet that does not have payload (like FLAG),
+ * it must return zero;
+ * 3) if it does not support the requested packet type/flag combination,
+ * it must return -ENOTSUPP.
+ *
+ * The @unlink callback is called when there are no more active writers so
+ * that the master/channel can be quiesced.
*/
struct stm_data {
const char *name;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index eead8ab93c0a..4bcf5a61aada 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -90,7 +90,21 @@ struct stmmac_dma_cfg {
int pbl;
int fixed_burst;
int mixed_burst;
- int burst_len;
+ bool aal;
+};
+
+#define AXI_BLEN 7
+struct stmmac_axi {
+ bool axi_lpi_en;
+ bool axi_xit_frm;
+ u32 axi_wr_osr_lmt;
+ u32 axi_rd_osr_lmt;
+ bool axi_kbbe;
+ bool axi_axi_all;
+ u32 axi_blen[AXI_BLEN];
+ bool axi_fb;
+ bool axi_mb;
+ bool axi_rb;
};
struct plat_stmmacenet_data {
@@ -100,6 +114,7 @@ struct plat_stmmacenet_data {
int interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
+ struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
int clk_csr;
int has_gmac;
@@ -122,5 +137,6 @@ struct plat_stmmacenet_data {
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
+ struct stmmac_axi *axi;
};
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 9eebc66d957a..d3993a79a325 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -128,7 +128,13 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
-extern int strtobool(const char *s, bool *res);
+extern int kstrtobool(const char *s, bool *res);
+static inline int strtobool(const char *s, bool *res)
+{
+ return kstrtobool(s, res);
+}
+
+int match_string(const char * const *array, size_t n, const char *string);
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 1ecf13e148b8..6a241a277249 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -21,10 +21,17 @@
#include <linux/utsname.h>
/*
+ * Maximum size of AUTH_NONE authentication information, in XDR words.
+ */
+#define NUL_CALLSLACK (4)
+#define NUL_REPLYSLACK (2)
+
+/*
* Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
* but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
*/
#define UNX_MAXNODENAME __NEW_UTS_LEN
+#define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME))
struct rpcsec_gss_info;
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 131032f15cc1..9a7ddbaf116e 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -25,6 +25,7 @@
#include <asm/signal.h>
#include <linux/path.h>
#include <net/ipv6.h>
+#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
@@ -67,6 +68,7 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
+ struct rpc_xprt_iter cl_xpi;
};
/*
@@ -139,7 +141,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
struct rpc_xprt *xprt);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
const struct rpc_program *, u32);
-void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
rpc_authflavor_t);
@@ -181,6 +182,21 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
+ int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
+ void *data);
+
+int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
+ struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt,
+ void *dummy);
+int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
+ int (*setup)(struct rpc_clnt *,
+ struct rpc_xprt_switch *,
+ struct rpc_xprt *,
+ void *),
+ void *data);
+
const char *rpc_proc_name(const struct rpc_task *task);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index df02a4188487..7df625d41e35 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -36,7 +36,7 @@
*
*/
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
#include <linux/sunrpc/gss_asn1.h>
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
const u32 keyed_cksum; /* is it a keyed cksum? */
const u32 keybytes; /* raw key len, in bytes */
const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_blkcipher *tfm,
+ u32 (*encrypt) (struct crypto_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* encryption function */
- u32 (*decrypt) (struct crypto_blkcipher *tfm,
+ u32 (*decrypt) (struct crypto_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* decryption function */
u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
u32 enctype;
u32 flags;
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_blkcipher *enc;
- struct crypto_blkcipher *seq;
- struct crypto_blkcipher *acceptor_enc;
- struct crypto_blkcipher *initiator_enc;
- struct crypto_blkcipher *acceptor_enc_aux;
- struct crypto_blkcipher *initiator_enc_aux;
+ struct crypto_skcipher *enc;
+ struct crypto_skcipher *seq;
+ struct crypto_skcipher *acceptor_enc;
+ struct crypto_skcipher *initiator_enc;
+ struct crypto_skcipher *acceptor_enc_aux;
+ struct crypto_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
s32 endtime;
@@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
u32
-krb5_encrypt(struct crypto_blkcipher *key,
+krb5_encrypt(struct crypto_skcipher *key,
void *iv, void *in, void *out, int length);
u32
-krb5_decrypt(struct crypto_blkcipher *key,
+krb5_decrypt(struct crypto_skcipher *key,
void *iv, void *in, void *out, int length);
int
-gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf,
+gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
int offset, struct page **pages);
int
-gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
+gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_blkcipher *key,
+ struct crypto_skcipher *key,
int direction,
u32 seqnum, unsigned char *cksum, unsigned char *buf);
@@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
- struct crypto_blkcipher *cipher,
+ struct crypto_skcipher *cipher,
unsigned char *cksum);
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
- struct crypto_blkcipher *cipher,
+ struct crypto_skcipher *cipher,
s32 seqnum);
void
gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index f33c5a4d6fe4..3b1ff38f0c37 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -93,6 +93,12 @@ struct rpcrdma_msg {
__be32 rm_pempty[3]; /* 3 empty chunk lists */
} rm_padded;
+ struct {
+ __be32 rm_err;
+ __be32 rm_vers_low;
+ __be32 rm_vers_high;
+ } rm_error;
+
__be32 rm_chunks[0]; /* read, write and reply chunks */
} rm_body;
@@ -102,17 +108,13 @@ struct rpcrdma_msg {
* Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
*/
#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
+#define RPCRDMA_HDRLEN_ERR (sizeof(__be32) * 5)
enum rpcrdma_errcode {
ERR_VERS = 1,
ERR_CHUNK = 2
};
-struct rpcrdma_err_vers {
- uint32_t rdma_vers_low; /* Version range supported by peer */
- uint32_t rdma_vers_high;
-};
-
enum rpcrdma_proc {
RDMA_MSG = 0, /* An RPC call or reply msg */
RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d703f0ef37d8..05a1809c44d9 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -42,40 +42,43 @@ struct rpc_wait {
*/
struct rpc_task {
atomic_t tk_count; /* Reference count */
+ int tk_status; /* result of last operation */
struct list_head tk_task; /* global list of tasks */
- struct rpc_clnt * tk_client; /* RPC client */
- struct rpc_rqst * tk_rqstp; /* RPC request */
-
- /*
- * RPC call state
- */
- struct rpc_message tk_msg; /* RPC call info */
/*
* callback to be executed after waking up
* action next procedure for async tasks
- * tk_ops caller callbacks
*/
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
- const struct rpc_call_ops *tk_ops;
- void * tk_calldata;
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned long tk_runstate; /* Task run status */
- struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
- * be any workqueue
- */
+
struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
union {
struct work_struct tk_work; /* Async task work queue */
struct rpc_wait tk_wait; /* RPC wait */
} u;
+ /*
+ * RPC call state
+ */
+ struct rpc_message tk_msg; /* RPC call info */
+ void * tk_calldata; /* Caller private data */
+ const struct rpc_call_ops *tk_ops; /* Caller callbacks */
+
+ struct rpc_clnt * tk_client; /* RPC client */
+ struct rpc_xprt * tk_xprt; /* Transport */
+
+ struct rpc_rqst * tk_rqstp; /* RPC request */
+
+ struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
+ * be any workqueue
+ */
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
- int tk_status; /* result of last operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
@@ -100,6 +103,7 @@ struct rpc_call_ops {
struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
+ struct rpc_xprt *rpc_xprt;
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 5322fea6fe4c..3081339968c3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -75,8 +75,10 @@ struct svc_rdma_op_ctxt {
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
struct xdr_buf arg;
+ struct ib_cqe cqe;
+ struct ib_cqe reg_cqe;
+ struct ib_cqe inv_cqe;
struct list_head dto_q;
- enum ib_wr_opcode wr_op;
enum ib_wc_status wc_status;
u32 byte_len;
u32 position;
@@ -174,8 +176,6 @@ struct svcxprt_rdma {
struct work_struct sc_work;
};
/* sc_flags */
-#define RDMAXPRT_RQ_PENDING 1
-#define RDMAXPRT_SQ_PENDING 2
#define RDMAXPRT_CONN_PENDING 3
#define RPCRDMA_LISTEN_BACKLOG 10
@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct xdr_buf *rcvbuf);
/* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
+extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *,
enum rpcrdma_errcode, __be32 *);
@@ -224,16 +224,22 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */
extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
- struct svc_rdma_req_map *);
+ struct svc_rdma_req_map *, bool);
extern int svc_rdma_sendto(struct svc_rqst *);
extern struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *);
+extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
+ int);
/* svc_rdma_transport.c */
+extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
-extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
- enum rpcrdma_errcode);
extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
+extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 69ef5b3ab038..fb0d212e0d3a 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -13,6 +13,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/ktime.h>
+#include <linux/kref.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
@@ -166,7 +167,7 @@ enum xprt_transports {
};
struct rpc_xprt {
- atomic_t count; /* Reference count */
+ struct kref kref; /* Reference count */
struct rpc_xprt_ops * ops; /* transport methods */
const struct rpc_timeout *timeout; /* timeout parms */
@@ -197,6 +198,11 @@ struct rpc_xprt {
unsigned int bind_index; /* bind function index */
/*
+ * Multipath
+ */
+ struct list_head xprt_switch;
+
+ /*
* Connection of transports
*/
unsigned long bind_timeout,
@@ -256,6 +262,7 @@ struct rpc_xprt {
struct dentry *debugfs; /* debugfs directory */
atomic_t inject_disconnect;
#endif
+ struct rcu_head rcu;
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -318,24 +325,13 @@ int xprt_adjust_timeout(struct rpc_rqst *req);
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release(struct rpc_task *task);
+struct rpc_xprt * xprt_get(struct rpc_xprt *xprt);
void xprt_put(struct rpc_xprt *xprt);
struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
-/**
- * xprt_get - return a reference to an RPC transport.
- * @xprt: pointer to the transport
- *
- */
-static inline struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
-{
- if (atomic_inc_not_zero(&xprt->count))
- return xprt;
- return NULL;
-}
-
static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
{
return p + xprt->tsh_size;
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
new file mode 100644
index 000000000000..5a9acffa41be
--- /dev/null
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -0,0 +1,69 @@
+/*
+ * RPC client multipathing definitions
+ *
+ * Copyright (c) 2015, 2016, Primary Data, Inc. All rights reserved.
+ *
+ * Trond Myklebust <trond.myklebust@primarydata.com>
+ */
+#ifndef _NET_SUNRPC_XPRTMULTIPATH_H
+#define _NET_SUNRPC_XPRTMULTIPATH_H
+
+struct rpc_xprt_iter_ops;
+struct rpc_xprt_switch {
+ spinlock_t xps_lock;
+ struct kref xps_kref;
+
+ unsigned int xps_nxprts;
+ struct list_head xps_xprt_list;
+
+ struct net * xps_net;
+
+ const struct rpc_xprt_iter_ops *xps_iter_ops;
+
+ struct rcu_head xps_rcu;
+};
+
+struct rpc_xprt_iter {
+ struct rpc_xprt_switch __rcu *xpi_xpswitch;
+ struct rpc_xprt * xpi_cursor;
+
+ const struct rpc_xprt_iter_ops *xpi_ops;
+};
+
+
+struct rpc_xprt_iter_ops {
+ void (*xpi_rewind)(struct rpc_xprt_iter *);
+ struct rpc_xprt *(*xpi_xprt)(struct rpc_xprt_iter *);
+ struct rpc_xprt *(*xpi_next)(struct rpc_xprt_iter *);
+};
+
+extern struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt,
+ gfp_t gfp_flags);
+
+extern struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps);
+extern void xprt_switch_put(struct rpc_xprt_switch *xps);
+
+extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
+
+extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt);
+extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt);
+
+extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
+extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
+extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
+
+extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
+ struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *newswitch);
+
+extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
+extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
+extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
+
+#endif
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b7b279b54504..767190b01363 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -54,8 +54,6 @@
#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
-#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
-
/* Memory registration strategies, by number.
* This is part of a kernel / user space API. Do not remove. */
enum rpcrdma_memreg {
diff --git a/include/linux/swait.h b/include/linux/swait.h
new file mode 100644
index 000000000000..c1f9c62a8a50
--- /dev/null
+++ b/include/linux/swait.h
@@ -0,0 +1,172 @@
+#ifndef _LINUX_SWAIT_H
+#define _LINUX_SWAIT_H
+
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <asm/current.h>
+
+/*
+ * Simple wait queues
+ *
+ * While these are very similar to the other/complex wait queues (wait.h) the
+ * most important difference is that the simple waitqueue allows for
+ * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
+ * times.
+ *
+ * In order to make this so, we had to drop a fair number of features of the
+ * other waitqueue code; notably:
+ *
+ * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
+ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
+ * sleeper state.
+ *
+ * - the exclusive mode; because this requires preserving the list order
+ * and this is hard.
+ *
+ * - custom wake functions; because you cannot give any guarantees about
+ * random code.
+ *
+ * As a side effect of this; the data structures are slimmer.
+ *
+ * One would recommend using this wait queue where possible.
+ */
+
+struct task_struct;
+
+struct swait_queue_head {
+ raw_spinlock_t lock;
+ struct list_head task_list;
+};
+
+struct swait_queue {
+ struct task_struct *task;
+ struct list_head task_list;
+};
+
+#define __SWAITQUEUE_INITIALIZER(name) { \
+ .task = current, \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+}
+
+#define DECLARE_SWAITQUEUE(name) \
+ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
+
+#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+}
+
+#define DECLARE_SWAIT_QUEUE_HEAD(name) \
+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
+
+extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
+ struct lock_class_key *key);
+
+#define init_swait_queue_head(q) \
+ do { \
+ static struct lock_class_key __key; \
+ __init_swait_queue_head((q), #q, &__key); \
+ } while (0)
+
+#ifdef CONFIG_LOCKDEP
+# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
+ ({ init_swait_queue_head(&name); name; })
+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
+#else
+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
+ DECLARE_SWAIT_QUEUE_HEAD(name)
+#endif
+
+static inline int swait_active(struct swait_queue_head *q)
+{
+ return !list_empty(&q->task_list);
+}
+
+extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_all(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q);
+
+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
+
+extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+#define ___swait_event(wq, condition, state, ret, cmd) \
+({ \
+ struct swait_queue __wait; \
+ long __ret = ret; \
+ \
+ INIT_LIST_HEAD(&__wait.task_list); \
+ for (;;) { \
+ long __int = prepare_to_swait_event(&wq, &__wait, state);\
+ \
+ if (condition) \
+ break; \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ break; \
+ } \
+ \
+ cmd; \
+ } \
+ finish_swait(&wq, &__wait); \
+ __ret; \
+})
+
+#define __swait_event(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
+ schedule())
+
+#define swait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event(wq, condition); \
+} while (0)
+
+#define __swait_event_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define swait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
+#define __swait_event_interruptible(wq, condition) \
+ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
+ schedule())
+
+#define swait_event_interruptible(wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __swait_event_interruptible(wq, condition); \
+ __ret; \
+})
+
+#define __swait_event_interruptible_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define swait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_interruptible_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
+#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 185815c96433..d795472c54d8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -575,8 +575,14 @@ asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos);
asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
+ int flags);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
+ int flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b386361ba3e8..7be9b1242354 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
return (struct tcphdr *)skb_transport_header(skb);
}
+static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
+{
+ return th->doff * 4;
+}
+
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
- return tcp_hdr(skb)->doff * 4;
+ return __tcp_hdrlen(tcp_hdr(skb));
}
static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
@@ -153,6 +158,9 @@ struct tcp_sock {
u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
* total number of segments in.
*/
+ u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
@@ -160,6 +168,9 @@ struct tcp_sock {
u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
* The total number of segments sent.
*/
+ u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
@@ -256,6 +267,7 @@ struct tcp_sock {
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
+ u32 delivered; /* Total data packets delivered incl. rexmits */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e13a1ace50e9..a55d0523f75d 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -156,6 +156,7 @@ struct thermal_attr {
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
+ * @trips_disabled; bitmap for disabled trips
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
@@ -191,6 +192,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
+ unsigned long trips_disabled; /* bitmap for disabled trips */
int passive_delay;
int polling_delay;
int temperature;
@@ -362,6 +364,11 @@ thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
const struct thermal_zone_of_device_ops *ops);
void thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
+struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -376,6 +383,19 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
{
}
+static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
+{
+}
+
#endif
#if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 97fd4e543846..62be0786d6d0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -97,8 +97,21 @@ static inline void tick_broadcast_exit(void)
tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
}
+enum tick_dep_bits {
+ TICK_DEP_BIT_POSIX_TIMER = 0,
+ TICK_DEP_BIT_PERF_EVENTS = 1,
+ TICK_DEP_BIT_SCHED = 2,
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3
+};
+
+#define TICK_DEP_MASK_NONE 0
+#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
+#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
+#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
+#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
+
#ifdef CONFIG_NO_HZ_COMMON
-extern int tick_nohz_enabled;
+extern bool tick_nohz_enabled;
extern int tick_nohz_tick_stopped(void);
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
@@ -154,9 +167,73 @@ static inline int housekeeping_any_cpu(void)
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
}
-extern void tick_nohz_full_kick(void);
+extern void tick_nohz_dep_set(enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
+extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
+extern void tick_nohz_dep_set_task(struct task_struct *tsk,
+ enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
+ enum tick_dep_bits bit);
+extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit);
+
+/*
+ * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
+ * on top of static keys.
+ */
+static inline void tick_dep_set(enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_set(bit);
+}
+
+static inline void tick_dep_clear(enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_clear(bit);
+}
+
+static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_cpu(cpu))
+ tick_nohz_dep_set_cpu(cpu, bit);
+}
+
+static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_cpu(cpu))
+ tick_nohz_dep_clear_cpu(cpu, bit);
+}
+
+static inline void tick_dep_set_task(struct task_struct *tsk,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_set_task(tsk, bit);
+}
+static inline void tick_dep_clear_task(struct task_struct *tsk,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_clear_task(tsk, bit);
+}
+static inline void tick_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_set_signal(signal, bit);
+}
+static inline void tick_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_clear_signal(signal, bit);
+}
+
extern void tick_nohz_full_kick_cpu(int cpu);
-extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(void);
#else
static inline int housekeeping_any_cpu(void)
@@ -166,9 +243,21 @@ static inline int housekeeping_any_cpu(void)
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+
+static inline void tick_dep_set(enum tick_dep_bits bit) { }
+static inline void tick_dep_clear(enum tick_dep_bits bit) { }
+static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_dep_set_task(struct task_struct *tsk,
+ enum tick_dep_bits bit) { }
+static inline void tick_dep_clear_task(struct task_struct *tsk,
+ enum tick_dep_bits bit) { }
+static inline void tick_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit) { }
+static inline void tick_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit) { }
+
static inline void tick_nohz_full_kick_cpu(int cpu) { }
-static inline void tick_nohz_full_kick(void) { }
-static inline void tick_nohz_full_kick_all(void) { }
static inline void __tick_nohz_task_switch(void) { }
#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 25247220b4b7..e88005459035 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -50,6 +50,7 @@ struct tk_read_base {
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_was_changed_seq: The sequence number of clocksource change events
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
@@ -91,6 +92,7 @@ struct timekeeper {
ktime_t offs_tai;
s32 tai_offset;
unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
ktime_t next_leap_ktime;
struct timespec64 raw_time;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index ec89d846324c..96f37bee3bc1 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -267,6 +267,64 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
struct timespec64 *ts_real);
/*
+ * struct system_time_snapshot - simultaneous raw/real time capture with
+ * counter value
+ * @cycles: Clocksource counter value to produce the system times
+ * @real: Realtime system time
+ * @raw: Monotonic raw system time
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_was_changed_seq: The sequence number of clocksource change events
+ */
+struct system_time_snapshot {
+ cycle_t cycles;
+ ktime_t real;
+ ktime_t raw;
+ unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
+};
+
+/*
+ * struct system_device_crosststamp - system/device cross-timestamp
+ * (syncronized capture)
+ * @device: Device time
+ * @sys_realtime: Realtime simultaneous with device time
+ * @sys_monoraw: Monotonic raw simultaneous with device time
+ */
+struct system_device_crosststamp {
+ ktime_t device;
+ ktime_t sys_realtime;
+ ktime_t sys_monoraw;
+};
+
+/*
+ * struct system_counterval_t - system counter value with the pointer to the
+ * corresponding clocksource
+ * @cycles: System counter value
+ * @cs: Clocksource corresponding to system counter value. Used by
+ * timekeeping code to verify comparibility of two cycle values
+ */
+struct system_counterval_t {
+ cycle_t cycles;
+ struct clocksource *cs;
+};
+
+/*
+ * Get cross timestamp between system clock and device clock
+ */
+extern int get_device_system_crosststamp(
+ int (*get_time_fn)(ktime_t *device_time,
+ struct system_counterval_t *system_counterval,
+ void *ctx),
+ void *ctx,
+ struct system_time_snapshot *history,
+ struct system_device_crosststamp *xtstamp);
+
+/*
+ * Simultaneously snapshot realtime and monotonic raw clocks
+ */
+extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
+
+/*
* Persistent clock related interfaces
*/
extern int persistent_clock_is_local;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 429fdfc3baf5..0810f81b6db2 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -15,16 +15,6 @@ struct tracer;
struct dentry;
struct bpf_prog;
-struct trace_print_flags {
- unsigned long mask;
- const char *name;
-};
-
-struct trace_print_flags_u64 {
- unsigned long long mask;
- const char *name;
-};
-
const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
const struct trace_print_flags *flag_array);
@@ -430,7 +420,8 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
extern void event_triggers_post_call(struct trace_event_file *file,
- enum event_trigger_type tt);
+ enum event_trigger_type tt,
+ void *rec);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
@@ -517,7 +508,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
- event_triggers_post_call(file, tt);
+ event_triggers_post_call(file, tt, entry);
}
/**
@@ -550,7 +541,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
irq_flags, pc, regs);
if (tt)
- event_triggers_post_call(file, tt);
+ event_triggers_post_call(file, tt, entry);
}
#ifdef CONFIG_BPF_EVENTS
@@ -568,6 +559,8 @@ enum {
FILTER_DYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_COMM,
+ FILTER_CPU,
};
extern int trace_event_raw_init(struct trace_event_call *call);
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index e1ee97c713bf..4ac89acb6136 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -3,13 +3,23 @@
/*
* File can be included directly by headers who only want to access
- * tracepoint->key to guard out of line trace calls. Otherwise
- * linux/tracepoint.h should be used.
+ * tracepoint->key to guard out of line trace calls, or the definition of
+ * trace_print_flags{_u64}. Otherwise linux/tracepoint.h should be used.
*/
#include <linux/atomic.h>
#include <linux/static_key.h>
+struct trace_print_flags {
+ unsigned long mask;
+ const char *name;
+};
+
+struct trace_print_flags_u64 {
+ unsigned long long mask;
+ const char *name;
+};
+
struct tracepoint_func {
void *func;
void *data;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index acfdbf353a0b..be586c632a0c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -134,9 +134,6 @@ extern void syscall_unregfunc(void);
void *it_func; \
void *__data; \
\
- if (!cpu_online(raw_smp_processor_id())) \
- return; \
- \
if (!(cond)) \
return; \
prercu; \
@@ -343,15 +340,19 @@ extern void syscall_unregfunc(void);
* "void *__data, proto" as the callback prototype.
*/
#define DECLARE_TRACE_NOARGS(name) \
- __DECLARE_TRACE(name, void, , 1, void *__data, __data)
+ __DECLARE_TRACE(name, void, , \
+ cpu_online(raw_smp_processor_id()), \
+ void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()), \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto), \
PARAMS(__data, args))
diff --git a/include/linux/tty.h b/include/linux/tty.h
index d9fb4b043f56..3b09f235db66 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -302,6 +302,7 @@ struct tty_struct {
struct work_struct hangup_work;
void *disc_data;
void *driver_data;
+ spinlock_t files_lock; /* protects tty_files list */
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
@@ -336,7 +337,6 @@ struct tty_file_private {
#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
-#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
@@ -433,8 +433,6 @@ extern struct device *tty_register_device_attr(struct tty_driver *driver,
void *drvdata,
const struct attribute_group **attr_grp);
extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
-extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
- int buflen);
extern void tty_write_message(struct tty_struct *tty, char *msg);
extern int tty_send_xchar(struct tty_struct *tty, char ch);
extern int tty_put_char(struct tty_struct *tty, unsigned char c);
@@ -446,12 +444,7 @@ extern void tty_unthrottle(struct tty_struct *tty);
extern int tty_throttle_safe(struct tty_struct *tty);
extern int tty_unthrottle_safe(struct tty_struct *tty);
extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
-extern void tty_driver_remove_tty(struct tty_driver *driver,
- struct tty_struct *tty);
-extern void tty_free_termios(struct tty_struct *tty);
extern int is_current_pgrp_orphaned(void);
-extern int is_ignored(int sig);
-extern int tty_signal(int sig, struct tty_struct *tty);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
extern int tty_hung_up_p(struct file *filp);
@@ -493,7 +486,8 @@ extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
-extern void tty_ldisc_hangup(struct tty_struct *tty);
+extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
+extern int tty_ldisc_reinit(struct tty_struct *tty, int disc);
extern const struct file_operations tty_ldiscs_proc_fops;
extern void tty_wakeup(struct tty_struct *tty);
@@ -508,16 +502,13 @@ extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
extern int tty_alloc_file(struct file *file);
extern void tty_add_file(struct tty_struct *tty, struct file *file);
extern void tty_free_file(struct file *file);
-extern void free_tty_struct(struct tty_struct *tty);
-extern void deinitialize_tty_struct(struct tty_struct *tty);
extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
-extern int tty_init_termios(struct tty_struct *tty);
+extern void tty_init_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
extern struct mutex tty_mutex;
-extern spinlock_t tty_files_lock;
#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
@@ -575,43 +566,29 @@ static inline int tty_port_users(struct tty_port *port)
extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
extern int tty_unregister_ldisc(int disc);
-extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
+extern int tty_set_ldisc(struct tty_struct *tty, int disc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
-extern void tty_ldisc_begin(void);
-
-static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
- char *f, int count)
-{
- if (ld->ops->receive_buf2)
- count = ld->ops->receive_buf2(ld->tty, p, f, count);
- else {
- count = min_t(int, count, ld->tty->receive_room);
- if (count)
- ld->ops->receive_buf(ld->tty, p, f, count);
- }
- return count;
-}
-
+extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ char *f, int count);
/* n_tty.c */
-extern struct tty_ldisc_ops tty_ldisc_N_TTY;
extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+extern void __init n_tty_init(void);
/* tty_audit.c */
#ifdef CONFIG_AUDIT
extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size, unsigned icanon);
+ size_t size);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
-extern void tty_audit_push(struct tty_struct *tty);
-extern int tty_audit_push_current(void);
+extern int tty_audit_push(void);
#else
static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size, unsigned icanon)
+ size_t size)
{
}
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
@@ -623,10 +600,7 @@ static inline void tty_audit_exit(void)
static inline void tty_audit_fork(struct signal_struct *sig)
{
}
-static inline void tty_audit_push(struct tty_struct *tty)
-{
-}
-static inline int tty_audit_push_current(void)
+static inline int tty_audit_push(void)
{
return 0;
}
@@ -648,11 +622,11 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* tty_mutex.c */
/* functions for preparation of BKL removal */
-extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern void tty_lock(struct tty_struct *tty);
extern int tty_lock_interruptible(struct tty_struct *tty);
-extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
-extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
+extern void tty_unlock(struct tty_struct *tty);
+extern void tty_lock_slave(struct tty_struct *tty);
+extern void tty_unlock_slave(struct tty_struct *tty);
extern void tty_set_lock_subclass(struct tty_struct *tty);
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 00c9d688d7b7..3971cf0eb467 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -25,12 +25,6 @@
* buffers of any input characters it may have queued to be
* delivered to the user mode process.
*
- * ssize_t (*chars_in_buffer)(struct tty_struct *tty);
- *
- * This function returns the number of input characters the line
- * discipline may have queued up to be delivered to the user mode
- * process.
- *
* ssize_t (*read)(struct tty_struct * tty, struct file * file,
* unsigned char * buf, size_t nr);
*
@@ -104,11 +98,6 @@
* seek to perform this action quickly but should wait until
* any pending driver I/O is completed.
*
- * void (*fasync)(struct tty_struct *, int on)
- *
- * Notify line discipline when signal-driven I/O is enabled or
- * disabled.
- *
* void (*dcd_change)(struct tty_struct *tty, unsigned int status)
*
* Tells the discipline that the DCD pin has changed its status.
@@ -188,7 +177,6 @@ struct tty_ldisc_ops {
int (*open)(struct tty_struct *);
void (*close)(struct tty_struct *);
void (*flush_buffer)(struct tty_struct *tty);
- ssize_t (*chars_in_buffer)(struct tty_struct *tty);
ssize_t (*read)(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t nr);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
@@ -209,7 +197,6 @@ struct tty_ldisc_ops {
char *fp, int count);
void (*write_wakeup)(struct tty_struct *);
void (*dcd_change)(struct tty_struct *, unsigned int);
- void (*fasync)(struct tty_struct *tty, int on);
int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
char *fp, int count);
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
index 99c1b4d20b0f..33383ca23837 100644
--- a/include/linux/unaligned/access_ok.h
+++ b/include/linux/unaligned/access_ok.h
@@ -4,62 +4,62 @@
#include <linux/kernel.h>
#include <asm/byteorder.h>
-static inline u16 get_unaligned_le16(const void *p)
+static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
-static inline u32 get_unaligned_le32(const void *p)
+static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
-static inline u64 get_unaligned_le64(const void *p)
+static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
-static inline u16 get_unaligned_be16(const void *p)
+static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
-static inline u32 get_unaligned_be32(const void *p)
+static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
-static inline u64 get_unaligned_be64(const void *p)
+static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
-static inline void put_unaligned_le16(u16 val, void *p)
+static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
-static inline void put_unaligned_le32(u32 val, void *p)
+static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
-static inline void put_unaligned_le64(u64 val, void *p)
+static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
-static inline void put_unaligned_be16(u16 val, void *p)
+static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
-static inline void put_unaligned_be32(u32 val, void *p)
+static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
-static inline void put_unaligned_be64(u64 val, void *p)
+static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 89533ba38691..6a9a0c28415d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -50,6 +50,7 @@ struct ep_device;
* struct usb_host_endpoint - host-side endpoint descriptor and queue
* @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
* @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
+ * @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint
* @urb_list: urbs queued to this endpoint; maintained by usbcore
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
* with one or more transfer descriptors (TDs) per urb
@@ -65,6 +66,7 @@ struct ep_device;
struct usb_host_endpoint {
struct usb_endpoint_descriptor desc;
struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
struct list_head urb_list;
void *hcpriv;
struct ep_device *ep_dev; /* For sysfs info */
@@ -330,6 +332,7 @@ struct usb_host_bos {
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
struct usb_ss_container_id_descriptor *ss_id;
+ struct usb_ptm_cap_descriptor *ptm_cap;
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
@@ -375,7 +378,6 @@ struct usb_bus {
struct usb_devmap devmap; /* device address allocation map */
struct usb_device *root_hub; /* Root hub */
struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
- struct list_head bus_list; /* list of busses */
struct mutex usb_address0_mutex; /* unaddressed device mutex */
@@ -642,9 +644,10 @@ extern struct usb_device *usb_hub_find_child(struct usb_device *hdev,
if (!child) continue; else
/* USB device locking */
-#define usb_lock_device(udev) device_lock(&(udev)->dev)
-#define usb_unlock_device(udev) device_unlock(&(udev)->dev)
-#define usb_trylock_device(udev) device_trylock(&(udev)->dev)
+#define usb_lock_device(udev) device_lock(&(udev)->dev)
+#define usb_unlock_device(udev) device_unlock(&(udev)->dev)
+#define usb_lock_device_interruptible(udev) device_lock_interruptible(&(udev)->dev)
+#define usb_trylock_device(udev) device_trylock(&(udev)->dev)
extern int usb_lock_device_for_reset(struct usb_device *udev,
const struct usb_interface *iface);
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 1074b8921a5d..2b81b24eb5aa 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -126,6 +126,10 @@ struct usb_os_desc_table {
* string identifiers assigned during @bind(). If this
* pointer is null after initiation, the function will not
* be available at super speed.
+ * @ssp_descriptors: Table of super speed plus descriptors, using
+ * interface and string identifiers assigned during @bind(). If
+ * this pointer is null after initiation, the function will not
+ * be available at super speed plus.
* @config: assigned when @usb_add_function() is called; this is the
* configuration with which this function is associated.
* @os_desc_table: Table of (interface id, os descriptors) pairs. The function
@@ -186,6 +190,7 @@ struct usb_function {
struct usb_descriptor_header **fs_descriptors;
struct usb_descriptor_header **hs_descriptors;
struct usb_descriptor_header **ss_descriptors;
+ struct usb_descriptor_header **ssp_descriptors;
struct usb_configuration *config;
@@ -317,6 +322,7 @@ struct usb_configuration {
unsigned superspeed:1;
unsigned highspeed:1;
unsigned fullspeed:1;
+ unsigned superspeed_plus:1;
struct usb_function *interface[MAX_CONFIG_INTERFACES];
};
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index d82d0068872b..5d4e151c49bf 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -595,6 +595,10 @@ struct usb_gadget_ops {
* only supports HNP on a different root port.
* @b_hnp_enable: OTG device feature flag, indicating that the A-Host
* enabled HNP support.
+ * @hnp_polling_support: OTG device feature flag, indicating if the OTG device
+ * in peripheral mode can support HNP polling.
+ * @host_request_flag: OTG device feature flag, indicating if A-Peripheral
+ * or B-Peripheral wants to take host role.
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
* @is_selfpowered: if the gadget is self-powered.
@@ -642,6 +646,8 @@ struct usb_gadget {
unsigned b_hnp_enable:1;
unsigned a_hnp_support:1;
unsigned a_alt_hnp_support:1;
+ unsigned hnp_polling_support:1;
+ unsigned host_request_flag:1;
unsigned quirk_ep_out_aligned_size:1;
unsigned quirk_altset_not_supp:1;
unsigned quirk_stall_not_supp:1;
@@ -729,6 +735,16 @@ static inline int gadget_is_superspeed(struct usb_gadget *g)
}
/**
+ * gadget_is_superspeed_plus() - return true if the hardware handles
+ * superspeed plus
+ * @g: controller that might support superspeed plus
+ */
+static inline int gadget_is_superspeed_plus(struct usb_gadget *g)
+{
+ return g->max_speed >= USB_SPEED_SUPER_PLUS;
+}
+
+/**
* gadget_is_otg - return true iff the hardware is OTG-ready
* @g: controller that might have a Mini-AB connector
*
@@ -1126,6 +1142,7 @@ extern int usb_add_gadget_udc_release(struct device *parent,
struct usb_gadget *gadget, void (*release)(struct device *dev));
extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
extern void usb_del_gadget_udc(struct usb_gadget *gadget);
+extern char *usb_get_gadget_udc_name(void);
/*-------------------------------------------------------------------------*/
@@ -1194,7 +1211,8 @@ struct usb_function;
int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **fs,
struct usb_descriptor_header **hs,
- struct usb_descriptor_header **ss);
+ struct usb_descriptor_header **ss,
+ struct usb_descriptor_header **ssp);
void usb_free_all_descriptors(struct usb_function *f);
struct usb_descriptor_header *usb_otg_descriptor_alloc(
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 4dcf8446dbcd..b98f831dcda3 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -23,6 +23,7 @@
#include <linux/rwsem.h>
#include <linux/interrupt.h>
+#include <linux/idr.h>
#define MAX_TOPO_LEVEL 6
@@ -630,8 +631,8 @@ extern void usb_set_device_state(struct usb_device *udev,
/* exported only within usbcore */
-extern struct list_head usb_bus_list;
-extern struct mutex usb_bus_list_lock;
+extern struct idr usb_bus_idr;
+extern struct mutex usb_bus_idr_lock;
extern wait_queue_head_t usb_kill_urb_queue;
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index e159b39f67a2..974c3796a23f 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -22,6 +22,7 @@
#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
+#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 96ddfb7ab018..0b3da40a525e 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -124,7 +124,7 @@ struct musb_hdrc_platform_data {
int (*set_power)(int state);
/* MUSB configuration-specific details */
- struct musb_hdrc_config *config;
+ const struct musb_hdrc_config *config;
/* Architecture specific board data */
void *board_data;
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 974bce93aa28..de3237fce6b2 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -16,6 +16,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np);
bool of_usb_host_tpl_support(struct device_node *np);
int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
+struct device_node *usb_of_get_child_node(struct device_node *parent,
+ int portnum);
#else
static inline enum usb_dr_mode
of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
@@ -31,6 +33,11 @@ static inline int of_usb_update_otg_caps(struct device_node *np,
{
return 0;
}
+static inline struct device_node *usb_of_get_child_node
+ (struct device_node *parent, int portnum)
+{
+ return NULL;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index f728f1854829..24198e16f849 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -40,6 +40,18 @@
#define PROTO_HOST (1)
#define PROTO_GADGET (2)
+#define OTG_STS_SELECTOR 0xF000 /* OTG status selector, according to
+ * OTG and EH 2.0 Chapter 6.2.3
+ * Table:6-4
+ */
+
+#define HOST_REQUEST_FLAG 1 /* Host request flag, according to
+ * OTG and EH 2.0 Charpter 6.2.3
+ * Table:6-5
+ */
+
+#define T_HOST_REQ_POLL (1500) /* 1500ms, HNP polling interval */
+
enum otg_fsm_timer {
/* Standard OTG timers */
A_WAIT_VRISE,
@@ -48,6 +60,7 @@ enum otg_fsm_timer {
A_AIDL_BDIS,
B_ASE0_BRST,
A_BIDL_ADIS,
+ B_AIDL_BDIS,
/* Auxiliary timers */
B_SE0_SRP,
@@ -119,6 +132,8 @@ struct otg_fsm {
/* Current usb protocol used: 0:undefine; 1:host; 2:client */
int protocol;
struct mutex lock;
+ u8 *host_req_flag;
+ struct delayed_work hnp_polling_work;
};
struct otg_fsm_ops {
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 4db191fe8c2c..00a47d058d83 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -184,6 +184,7 @@ struct renesas_usbhs_driver_param {
};
#define USBHS_TYPE_RCAR_GEN2 1
+#define USBHS_TYPE_RCAR_GEN3 2
/*
* option:
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index cb33fff2ba0b..305ee8db7faf 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -45,9 +45,9 @@
#define USB_PR_DEVICE 0xff /* Use device's value */
- /*
- * Bulk only data structures
- */
+/*
+ * Bulk only data structures
+ */
/* command block wrapper */
struct bulk_cb_wrap {
@@ -56,18 +56,18 @@ struct bulk_cb_wrap {
__le32 DataTransferLength; /* size of data */
__u8 Flags; /* direction in bit 0 */
__u8 Lun; /* LUN normally 0 */
- __u8 Length; /* of of the CDB */
+ __u8 Length; /* length of the CDB */
__u8 CDB[16]; /* max command */
};
#define US_BULK_CB_WRAP_LEN 31
-#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
+#define US_BULK_CB_SIGN 0x43425355 /* spells out 'USBC' */
#define US_BULK_FLAG_IN (1 << 7)
#define US_BULK_FLAG_OUT 0
/* command status wrapper */
struct bulk_cs_wrap {
- __le32 Signature; /* should = 'USBS' */
+ __le32 Signature; /* contains 'USBS' */
__u32 Tag; /* same as original command */
__le32 Residue; /* amount not transferred */
__u8 Status; /* see below */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 610a86a892b8..0ecae0b1cd34 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -92,6 +92,17 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
+/*
+ * Sub-module helpers
+ */
+struct vfio_info_cap {
+ struct vfio_info_cap_header *buf;
+ size_t size;
+};
+extern struct vfio_info_cap_header *vfio_info_cap_add(
+ struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
+extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
+
struct pci_dev;
#ifdef CONFIG_EEH
extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 69e1d4a1f1b3..b39a5f3153bd 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -36,6 +36,26 @@
struct pci_dev;
/**
+ * enum vga_switcheroo_handler_flags_t - handler flags bitmask
+ * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
+ * DDC lines separately. This signals to clients that they should call
+ * drm_get_edid_switcheroo() to probe the EDID
+ * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
+ * the AUX channel separately. This signals to clients that the active
+ * GPU needs to train the link and communicate the link parameters to the
+ * inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
+ * skip the AUX handshake and set up its output with these pre-calibrated
+ * values (DisplayPort specification v1.1a, section 2.5.3.3)
+ *
+ * Handler flags bitmask. Used by handlers to declare their capabilities upon
+ * registering with vga_switcheroo.
+ */
+enum vga_switcheroo_handler_flags_t {
+ VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0),
+ VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1),
+};
+
+/**
* enum vga_switcheroo_state - client power state
* @VGA_SWITCHEROO_OFF: off
* @VGA_SWITCHEROO_ON: on
@@ -82,6 +102,9 @@ enum vga_switcheroo_client_id {
* Mandatory. For muxless machines this should be a no-op. Returning 0
* denotes success, anything else failure (in which case the switch is
* aborted)
+ * @switch_ddc: switch DDC lines to given client.
+ * Optional. Should return the previous DDC owner on success or a
+ * negative int on failure
* @power_state: cut or reinstate power of given client.
* Optional. The return value is ignored
* @get_client_id: determine if given pci device is integrated or discrete GPU.
@@ -93,6 +116,7 @@ enum vga_switcheroo_client_id {
struct vga_switcheroo_handler {
int (*init)(void);
int (*switchto)(enum vga_switcheroo_client_id id);
+ int (*switch_ddc)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
@@ -132,8 +156,12 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+ enum vga_switcheroo_handler_flags_t handler_flags);
void vga_switcheroo_unregister_handler(void);
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
int vga_switcheroo_process_delayed_switch(void);
@@ -150,11 +178,15 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
-static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+ enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
+static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
+static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
+static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8f4d4bfa6d46..d5eb5479a425 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,8 +75,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
bool virtqueue_is_broken(struct virtqueue *vq);
-void *virtqueue_get_avail(struct virtqueue *vq);
-void *virtqueue_get_used(struct virtqueue *vq);
+const struct vring *virtqueue_get_vring(struct virtqueue *vq);
+dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
+dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
+dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
+
+/*
+ * Legacy accessors -- in almost all cases, these are the wrong functions
+ * to use.
+ */
+static inline void *virtqueue_get_desc(struct virtqueue *vq)
+{
+ return virtqueue_get_vring(vq)->desc;
+}
+static inline void *virtqueue_get_avail(struct virtqueue *vq)
+{
+ return virtqueue_get_vring(vq)->avail;
+}
+static inline void *virtqueue_get_used(struct virtqueue *vq)
+{
+ return virtqueue_get_vring(vq)->used;
+}
/**
* virtio_device - representation of a device using virtio
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index a156e2b6ccfe..e8d36938f09a 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -59,6 +59,35 @@ static inline void virtio_store_mb(bool weak_barriers,
struct virtio_device;
struct virtqueue;
+/*
+ * Creates a virtqueue and allocates the descriptor ring. If
+ * may_reduce_num is set, then this may allocate a smaller ring than
+ * expected. The caller should query virtqueue_get_ring_size to learn
+ * the actual size of the ring.
+ */
+struct virtqueue *vring_create_virtqueue(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name);
+
+/* Creates a virtqueue with a custom layout. */
+struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring vring,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+
+/*
+ * Creates a virtqueue with a standard layout but a caller-allocated
+ * ring.
+ */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -68,7 +97,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
+
+/*
+ * Destroys a virtqueue. If created with vring_create_virtqueue, this
+ * also frees the ring.
+ */
void vring_del_virtqueue(struct virtqueue *vq);
+
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 67c1dbd19c6d..ec084321fe09 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -53,6 +53,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+ KCOMPACTD_WAKE,
#endif
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -71,6 +72,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_COLLAPSE_ALLOC_FAILED,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
+ THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 65ac54c61c18..1bd31a38c51e 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -734,6 +734,41 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
}
/*
+ * Helper to read a value from a head or tail pointer. For X86_32, the
+ * pointer is treated as a 32bit value, since the pointer value
+ * never exceeds a 32bit value in this case. Also, doing an
+ * atomic64_read on X86_32 uniprocessor systems may be implemented
+ * as a non locked cmpxchg8b, that may end up overwriting updates done
+ * by the VMCI device to the memory location. On 32bit SMP, the lock
+ * prefix will be used, so correctness isn't an issue, but using a
+ * 64bit operation still adds unnecessary overhead.
+ */
+static inline u64 vmci_q_read_pointer(atomic64_t *var)
+{
+#if defined(CONFIG_X86_32)
+ return atomic_read((atomic_t *)var);
+#else
+ return atomic64_read(var);
+#endif
+}
+
+/*
+ * Helper to set the value of a head or tail pointer. For X86_32, the
+ * pointer is treated as a 32bit value, since the pointer value
+ * never exceeds a 32bit value in this case. On 32bit SMP, using a
+ * locked cmpxchg8b adds unnecessary overhead.
+ */
+static inline void vmci_q_set_pointer(atomic64_t *var,
+ u64 new_val)
+{
+#if defined(CONFIG_X86_32)
+ return atomic_set((atomic_t *)var, (u32)new_val);
+#else
+ return atomic64_set(var, new_val);
+#endif
+}
+
+/*
* Helper to add a given offset to a head or tail pointer. Wraps the
* value of the pointer around the max size of the queue.
*/
@@ -741,14 +776,14 @@ static inline void vmci_qp_add_pointer(atomic64_t *var,
size_t add,
u64 size)
{
- u64 new_val = atomic64_read(var);
+ u64 new_val = vmci_q_read_pointer(var);
if (new_val >= size - add)
new_val -= size;
new_val += add;
- atomic64_set(var, new_val);
+ vmci_q_set_pointer(var, new_val);
}
/*
@@ -758,7 +793,7 @@ static inline u64
vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
{
struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
- return atomic64_read(&qh->producer_tail);
+ return vmci_q_read_pointer(&qh->producer_tail);
}
/*
@@ -768,7 +803,7 @@ static inline u64
vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
{
struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
- return atomic64_read(&qh->consumer_head);
+ return vmci_q_read_pointer(&qh->consumer_head);
}
/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ae71a769b89e..27d7a0ab5da3 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -338,7 +338,7 @@ do { \
schedule(); try_to_freeze())
/**
- * wait_event - sleep (or freeze) until a condition gets true
+ * wait_event_freezable - sleep (or freeze) until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index b585fa2507ee..51732d6c9555 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -10,8 +10,9 @@
#include <linux/bitops.h>
-#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/notifier.h>
#include <uapi/linux/watchdog.h>
@@ -46,7 +47,7 @@ struct watchdog_ops {
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
- int (*restart)(struct watchdog_device *);
+ int (*restart)(struct watchdog_device *, unsigned long, void *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
};
@@ -61,14 +62,21 @@ struct watchdog_ops {
* @bootstatus: Status of the watchdog device at boot.
* @timeout: The watchdog devices timeout value (in seconds).
* @min_timeout:The watchdog devices minimum timeout value (in seconds).
- * @max_timeout:The watchdog devices maximum timeout value (in seconds).
+ * @max_timeout:The watchdog devices maximum timeout value (in seconds)
+ * as configurable from user space. Only relevant if
+ * max_hw_heartbeat_ms is not provided.
+ * @min_hw_heartbeat_ms:
+ * Minimum time between heartbeats, in milli-seconds.
+ * @max_hw_heartbeat_ms:
+ * Hardware limit for maximum timeout, in milli-seconds.
+ * Replaces max_timeout if specified.
* @reboot_nb: The notifier block to stop watchdog on reboot.
* @restart_nb: The notifier block to register a restart function.
* @driver_data:Pointer to the drivers private data.
* @wd_data: Pointer to watchdog core internal data.
* @status: Field that contains the devices internal status bits.
- * @deferred: entry in wtd_deferred_reg_list which is used to
- * register early initialized watchdogs.
+ * @deferred: Entry in wtd_deferred_reg_list which is used to
+ * register early initialized watchdogs.
*
* The watchdog_device structure contains all information about a
* watchdog timer device.
@@ -89,6 +97,8 @@ struct watchdog_device {
unsigned int timeout;
unsigned int min_timeout;
unsigned int max_timeout;
+ unsigned int min_hw_heartbeat_ms;
+ unsigned int max_hw_heartbeat_ms;
struct notifier_block reboot_nb;
struct notifier_block restart_nb;
void *driver_data;
@@ -98,6 +108,7 @@ struct watchdog_device {
#define WDOG_ACTIVE 0 /* Is the watchdog running/active */
#define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */
#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */
+#define WDOG_HW_RUNNING 3 /* True if HW watchdog running */
struct list_head deferred;
};
@@ -110,6 +121,15 @@ static inline bool watchdog_active(struct watchdog_device *wdd)
return test_bit(WDOG_ACTIVE, &wdd->status);
}
+/*
+ * Use the following function to check whether or not the hardware watchdog
+ * is running
+ */
+static inline bool watchdog_hw_running(struct watchdog_device *wdd)
+{
+ return test_bit(WDOG_HW_RUNNING, &wdd->status);
+}
+
/* Use the following function to set the nowayout feature */
static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
{
@@ -128,13 +148,18 @@ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigne
{
/*
* The timeout is invalid if
+ * - the requested value is larger than UINT_MAX / 1000
+ * (since internal calculations are done in milli-seconds),
+ * or
* - the requested value is smaller than the configured minimum timeout,
* or
- * - a maximum timeout is configured, and the requested value is larger
- * than the maximum timeout.
+ * - a maximum hardware timeout is not configured, a maximum timeout
+ * is configured, and the requested value is larger than the
+ * configured maximum timeout.
*/
- return t < wdd->min_timeout ||
- (wdd->max_timeout && t > wdd->max_timeout);
+ return t > UINT_MAX / 1000 || t < wdd->min_timeout ||
+ (!wdd->max_hw_heartbeat_ms && wdd->max_timeout &&
+ t > wdd->max_timeout);
}
/* Use the following functions to manipulate watchdog driver specific data */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b333c945e571..d0b5ca5d4e08 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
void wbc_detach_inode(struct writeback_control *wbc);
void wbc_account_io(struct writeback_control *wbc, struct page *page,
size_t bytes);
+void cgroup_writeback_umount(void);
/**
* inode_attach_wb - associate an inode with its wb
@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
{
}
+static inline void cgroup_writeback_umount(void)
+{
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
/*