summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/uaccess-unaligned.h26
-rw-r--r--include/drm/bridge/dw_hdmi.h70
-rw-r--r--include/dt-bindings/clock/boston-clock.h14
-rw-r--r--include/linux/backing-dev.h24
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/clk.h22
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/cpu_cooling.h32
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/crash_core.h7
-rw-r--r--include/linux/cred.h4
-rw-r--r--include/linux/dcache.h11
-rw-r--r--include/linux/eventpoll.h5
-rw-r--r--include/linux/flat.h2
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/gfp.h56
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--include/linux/ipc.h5
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/jhash.h29
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key-type.h4
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kvm_host.h17
-rw-r--r--include/linux/llist.h21
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockd/xdr.h26
-rw-r--r--include/linux/lockd/xdr4.h26
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mlx4/device.h10
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msg.h2
-rw-r--r--include/linux/mtd/nand.h80
-rw-r--r--include/linux/mtd/partitions.h7
-rw-r--r--include/linux/mtd/spi-nor.h161
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netfilter.h9
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/nfs_xdr.h31
-rw-r--r--include/linux/nmi.h57
-rw-r--r--include/linux/ntb.h721
-rw-r--r--include/linux/nvmem-provider.h3
-rw-r--r--include/linux/once.h2
-rw-r--r--include/linux/path.h2
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/platform_data/usb-ohci-s3c2410.h2
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/random.h47
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sem.h26
-rw-r--r--include/linux/shm.h2
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/string.h202
-rw-r--r--include/linux/sunrpc/clnt.h6
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/sunrpc/svc.h23
-rw-r--r--include/linux/sunrpc/svc_rdma.h46
-rw-r--r--include/linux/sunrpc/xdr.h15
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/utsname.h2
-rw-r--r--include/linux/vfio.h2
-rw-r--r--include/net/9p/client.h13
-rw-r--r--include/net/9p/transport.h1
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netlink.h4
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sock.h2
-rw-r--r--include/rdma/ib_verbs.h18
-rw-r--r--include/rdma/rdma_vt.h5
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h10
-rw-r--r--include/target/target_core_backend.h17
-rw-r--r--include/target/target_core_base.h11
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/uapi/linux/kcmp.h10
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/sem.h2
-rw-r--r--include/uapi/linux/target_core_user.h12
102 files changed, 1672 insertions, 509 deletions
diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h
deleted file mode 100644
index 67deb898f0c5..000000000000
--- a/include/asm-generic/uaccess-unaligned.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H
-#define __ASM_GENERIC_UACCESS_UNALIGNED_H
-
-/*
- * This macro should be used instead of __get_user() when accessing
- * values at locations that are not known to be aligned.
- */
-#define __get_user_unaligned(x, ptr) \
-({ \
- __typeof__ (*(ptr)) __x; \
- __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
- (x) = __x; \
-})
-
-
-/*
- * This macro should be used instead of __put_user() when accessing
- * values at locations that are not known to be aligned.
- */
-#define __put_user_unaligned(x, ptr) \
-({ \
- __typeof__ (*(ptr)) __x = (x); \
- __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
-})
-
-#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 4c8d4c81a0e8..182f83283e24 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -22,56 +22,56 @@ struct dw_hdmi;
* 48bit bus.
*
* +----------------------+----------------------------------+------------------------------+
- * + Format Name + Format Code + Encodings +
+ * | Format Name | Format Code | Encodings |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 8bit + ``MEDIA_BUS_FMT_RGB888_1X24`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 10bits + ``MEDIA_BUS_FMT_RGB101010_1X30`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 12bits + ``MEDIA_BUS_FMT_RGB121212_1X36`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + RGB 4:4:4 16bits + ``MEDIA_BUS_FMT_RGB161616_1X48`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 8bit + ``MEDIA_BUS_FMT_YUV8_1X24`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 10bits + ``MEDIA_BUS_FMT_YUV10_1X30`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 12bits + ``MEDIA_BUS_FMT_YUV12_1X36`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:4:4 16bits + ``MEDIA_BUS_FMT_YUV16_1X48`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
- * + + + or ``V4L2_YCBCR_ENC_XV601`` +
- * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV601`` |
+ * | | | or ``V4L2_YCBCR_ENC_XV709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:2 8bit + ``MEDIA_BUS_FMT_UYVY8_1X16`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:2 10bits + ``MEDIA_BUS_FMT_UYVY10_1X20`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:2 12bits + ``MEDIA_BUS_FMT_UYVY12_1X24`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 8bit + ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` + ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 10bits + ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``+ ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 12bits + ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``+ ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
- * + YCbCr 4:2:0 16bits + ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``+ ``V4L2_YCBCR_ENC_601`` +
- * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` |
+ * | | | or ``V4L2_YCBCR_ENC_709`` |
* +----------------------+----------------------------------+------------------------------+
*/
diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h
new file mode 100644
index 000000000000..a6f009821137
--- /dev/null
+++ b/include/dt-bindings/clock/boston-clock.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
+#define __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
+
+#define BOSTON_CLK_INPUT 0
+#define BOSTON_CLK_SYS 1
+#define BOSTON_CLK_CPU 2
+
+#endif /* __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 334165c911f0..854e1bdd0b2a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -69,34 +69,14 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void __inc_wb_stat(struct bdi_writeback *wb,
- enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, 1);
-}
-
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- unsigned long flags;
-
- local_irq_save(flags);
- __inc_wb_stat(wb, item);
- local_irq_restore(flags);
-}
-
-static inline void __dec_wb_stat(struct bdi_writeback *wb,
- enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, -1);
+ __add_wb_stat(wb, item, 1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- unsigned long flags;
-
- local_irq_save(flags);
- __dec_wb_stat(wb, item);
- local_irq_restore(flags);
+ __add_wb_stat(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 05488da3aee9..3ae9013eeaaa 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -46,7 +46,7 @@ struct linux_binprm {
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
-};
+} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
@@ -81,7 +81,7 @@ struct linux_binfmt {
int (*load_shlib)(struct file *);
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
-};
+} __randomize_layout;
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 360c082e885c..d41d40ac3efd 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -85,7 +85,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
- if (sk_fullsock(__sk)) \
+ if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 621076f56251..8e5d31f6faef 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -43,6 +43,7 @@ struct bpf_reg_state {
u32 min_align;
u32 aux_off;
u32 aux_off_align;
+ bool value_from_signed;
};
enum bpf_stack_slot_type {
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index 408bc09ce497..cb28eb21e3ca 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -17,7 +17,7 @@ struct cdev {
struct list_head list;
dev_t dev;
unsigned int count;
-};
+} __randomize_layout;
void cdev_init(struct cdev *, const struct file_operations *);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index f0f6c537b64c..040dd105c3e7 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -10,14 +10,14 @@
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- const static uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- const static uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 91bd464f4c9b..12c96d94d1fa 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -657,6 +657,28 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
+static inline int clk_bulk_prepare_enable(int num_clks,
+ struct clk_bulk_data *clks)
+{
+ int ret;
+
+ ret = clk_bulk_prepare(num_clks, clks);
+ if (ret)
+ return ret;
+ ret = clk_bulk_enable(num_clks, clks);
+ if (ret)
+ clk_bulk_unprepare(num_clks, clks);
+
+ return ret;
+}
+
+static inline void clk_bulk_disable_unprepare(int num_clks,
+ struct clk_bulk_data *clks)
+{
+ clk_bulk_disable(num_clks, clks);
+ clk_bulk_unprepare(num_clks, clks);
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cd4bbe8242bd..bdb80c4aef6e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -235,6 +235,7 @@
#endif /* GCC_VERSION >= 40500 */
#if GCC_VERSION >= 40600
+
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
@@ -242,7 +243,17 @@
* this.
*/
#define __visible __attribute__((externally_visible))
-#endif
+
+/*
+ * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
+ * possible since GCC 4.6. To provide as much build testing coverage
+ * as possible, this is used for all GCC 4.6+ builds, and not just on
+ * RANDSTRUCT_PLUGIN builds.
+ */
+#define randomized_struct_fields_start struct {
+#define randomized_struct_fields_end } __randomize_layout;
+
+#endif /* GCC_VERSION >= 40600 */
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 219f82f3ec1a..eca8ad75e28b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -452,6 +452,11 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __no_randomize_layout
#endif
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c156f5082758..d4292ebc5c8b 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -28,47 +28,49 @@
#include <linux/thermal.h>
#include <linux/cpumask.h>
+struct cpufreq_policy;
+
typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
unsigned long voltage, u32 *power);
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @policy: cpufreq policy.
*/
struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus);
+cpufreq_cooling_register(struct cpufreq_policy *policy);
struct thermal_cooling_device *
-cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func);
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @policy: cpufreq policy.
*/
#ifdef CONFIG_THERMAL_OF
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus);
+ struct cpufreq_policy *policy);
struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
@@ -82,15 +84,14 @@ of_cpufreq_power_cooling_register(struct device_node *np,
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
-unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
#else /* !CONFIG_CPU_THERMAL */
static inline struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus)
+cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+cpufreq_power_cooling_register(struct cpufreq_policy *policy,
u32 capacitance, get_static_t plat_static_func)
{
return NULL;
@@ -98,14 +99,14 @@ cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_power_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus,
+ struct cpufreq_policy *policy,
u32 capacitance,
get_static_t plat_static_func)
{
@@ -117,11 +118,6 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
return;
}
-static inline
-unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
-{
- return THERMAL_CSTATE_INVALID;
-}
#endif /* CONFIG_CPU_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 905117bd5012..f10a9b3761cd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -862,6 +862,20 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
return -EINVAL;
}
}
+
+static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ int count = 0;
+
+ if (unlikely(!policy->freq_table))
+ return 0;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table)
+ count++;
+
+ return count;
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 4090a42578a8..2df2118fbe13 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -19,7 +19,7 @@
CRASH_CORE_NOTE_NAME_BYTES + \
CRASH_CORE_NOTE_DESC_BYTES)
-#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_BYTES PAGE_SIZE
#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
@@ -28,6 +28,7 @@
typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+void crash_update_vmcoreinfo_safecopy(void *ptr);
void crash_save_vmcoreinfo(void);
void arch_crash_save_vmcoreinfo(void);
__printf(1, 2)
@@ -56,9 +57,7 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-extern size_t vmcoreinfo_size;
-extern size_t vmcoreinfo_max_size;
+extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index c728d515e5e2..099058e1178b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -31,7 +31,7 @@ struct group_info {
atomic_t usage;
int ngroups;
kgid_t gid[0];
-};
+} __randomize_layout;
/**
* get_group_info - Get a reference to a group info structure
@@ -145,7 +145,7 @@ struct cred {
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
-};
+} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 025727bf6797..aae1cdb76851 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -55,6 +55,11 @@ struct qstr {
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+extern const char empty_string[];
+extern const struct qstr empty_name;
+extern const char slash_string[];
+extern const struct qstr slash_name;
+
struct dentry_stat_t {
long nr_dentry;
long nr_unused;
@@ -113,7 +118,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-};
+} __randomize_layout;
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -592,8 +597,8 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
}
struct name_snapshot {
- const char *name;
- char inline_name[DNAME_INLINE_LEN];
+ const unsigned char *name;
+ unsigned char inline_name[DNAME_INLINE_LEN];
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 6daf6d4971f6..2f14ac73d01d 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -14,6 +14,7 @@
#define _LINUX_EVENTPOLL_H
#include <uapi/linux/eventpoll.h>
+#include <uapi/linux/kcmp.h>
/* Forward declarations to avoid compiler errors */
@@ -22,6 +23,10 @@ struct file;
#ifdef CONFIG_EPOLL
+#ifdef CONFIG_CHECKPOINT_RESTORE
+struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
+#endif
+
/* Used to initialize the epoll bits inside the "struct file" */
static inline void eventpoll_init_file(struct file *file)
{
diff --git a/include/linux/flat.h b/include/linux/flat.h
index 2c1eb15c4ba4..7d542dfd0def 100644
--- a/include/linux/flat.h
+++ b/include/linux/flat.h
@@ -9,8 +9,8 @@
#ifndef _LINUX_FLAT_H
#define _LINUX_FLAT_H
-#include <asm/flat.h>
#include <uapi/linux/flat.h>
+#include <asm/flat.h>
/*
* While it would be nice to keep this header clean, users of older
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 976aaa1af82a..6e1fd5d21248 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -296,7 +296,7 @@ struct kiocb {
void *private;
int ki_flags;
enum rw_hint ki_hint;
-};
+} __randomize_layout;
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
@@ -404,7 +404,7 @@ struct address_space {
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
errseq_t wb_err;
-} __attribute__((aligned(sizeof(long))));
+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
@@ -447,7 +447,7 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
-};
+} __randomize_layout;
/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
@@ -666,7 +666,7 @@ struct inode {
#endif
void *i_private; /* fs or device private pointer */
-};
+} __randomize_layout;
static inline unsigned int i_blocksize(const struct inode *node)
{
@@ -883,7 +883,8 @@ struct file {
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+} __randomize_layout
+ __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
@@ -1020,7 +1021,7 @@ struct file_lock {
int state; /* state of grant or error if -ve */
} afs;
} fl_u;
-};
+} __randomize_layout;
struct file_lock_context {
spinlock_t flc_lock;
@@ -1364,11 +1365,6 @@ struct super_block {
*/
char *s_subtype;
- /*
- * Saved mount options for lazy filesystems using
- * generic_show_options()
- */
- char __rcu *s_options;
const struct dentry_operations *s_d_op; /* default d_op for dentries */
/*
@@ -1417,7 +1413,7 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
-};
+} __randomize_layout;
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
@@ -1703,7 +1699,7 @@ struct file_operations {
u64);
ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
u64);
-};
+} __randomize_layout;
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
@@ -3046,7 +3042,7 @@ extern int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block);
-extern void get_filesystem(struct file_system_type *fs);
+extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
@@ -3123,10 +3119,6 @@ extern void setattr_copy(struct inode *inode, const struct iattr *attr);
extern int file_update_time(struct file *file);
-extern int generic_show_options(struct seq_file *m, struct dentry *root);
-extern void save_mount_options(struct super_block *sb, char *options);
-extern void replace_mount_options(struct super_block *sb, char *options);
-
static inline bool io_is_direct(struct file *filp)
{
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 0efc3e62843a..7a026240cbb1 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -12,7 +12,7 @@ struct fs_struct {
int umask;
int in_exec;
struct path root, pwd;
-};
+} __randomize_layout;
extern struct kmem_cache *fs_cachep;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 9ab375419189..50893a1646cf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -99,6 +99,10 @@ struct fwnode_operations {
(fwnode ? (fwnode_has_op(fwnode, op) ? \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
-EINVAL)
+#define fwnode_call_bool_op(fwnode, op, ...) \
+ (fwnode ? (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \
+ false)
#define fwnode_call_ptr_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4c6656f1fee7..bcfb9f7c46f5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -25,7 +25,7 @@ struct vm_area_struct;
#define ___GFP_FS 0x80u
#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u
-#define ___GFP_REPEAT 0x400u
+#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
#define ___GFP_NORETRY 0x1000u
#define ___GFP_MEMALLOC 0x2000u
@@ -136,26 +136,56 @@ struct vm_area_struct;
*
* __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
- * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
- * _might_ fail. This depends upon the particular VM implementation.
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules
+ *
+ * __GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with __GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. New users should be evaluated carefully
- * (and the flag should be used only when there is no reasonable failure
- * policy) but it is definitely preferable to use the flag rather than
- * opencode endless loop around allocator.
- *
- * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
- * return NULL when direct reclaim and memory compaction have failed to allow
- * the allocation to succeed. The OOM killer is not called with the current
- * implementation.
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8d9fe131a240..0ed8e41aaf11 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -268,6 +268,9 @@ struct hugetlbfs_sb_info {
spinlock_t stat_lock;
struct hstate *hstate;
struct hugepage_subpool *spool;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
};
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 71fd92d81b26..fadd579d577d 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -20,6 +20,9 @@ struct kern_ipc_perm {
umode_t mode;
unsigned long seq;
void *security;
-} ____cacheline_aligned_in_smp;
+
+ struct rcu_head rcu;
+ atomic_t refcount;
+} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 848e5796400e..65327ee0936b 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -61,7 +61,7 @@ struct ipc_namespace {
struct ucounts *ucounts;
struct ns_common ns;
-};
+} __randomize_layout;
extern struct ipc_namespace init_ipc_ns;
extern spinlock_t mq_lock;
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 348c6f47e4cc..8037850f3104 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -85,19 +85,18 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
k += 12;
}
/* Last block: affect all 32 bits of (c) */
- /* All the case statements fall through */
switch (length) {
- case 12: c += (u32)k[11]<<24;
- case 11: c += (u32)k[10]<<16;
- case 10: c += (u32)k[9]<<8;
- case 9: c += k[8];
- case 8: b += (u32)k[7]<<24;
- case 7: b += (u32)k[6]<<16;
- case 6: b += (u32)k[5]<<8;
- case 5: b += k[4];
- case 4: a += (u32)k[3]<<24;
- case 3: a += (u32)k[2]<<16;
- case 2: a += (u32)k[1]<<8;
+ case 12: c += (u32)k[11]<<24; /* fall through */
+ case 11: c += (u32)k[10]<<16; /* fall through */
+ case 10: c += (u32)k[9]<<8; /* fall through */
+ case 9: c += k[8]; /* fall through */
+ case 8: b += (u32)k[7]<<24; /* fall through */
+ case 7: b += (u32)k[6]<<16; /* fall through */
+ case 6: b += (u32)k[5]<<8; /* fall through */
+ case 5: b += k[4]; /* fall through */
+ case 4: a += (u32)k[3]<<24; /* fall through */
+ case 3: a += (u32)k[2]<<16; /* fall through */
+ case 2: a += (u32)k[1]<<8; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
@@ -131,10 +130,10 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
k += 3;
}
- /* Handle the last 3 u32's: all the case statements fall through */
+ /* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2];
- case 2: b += k[1];
+ case 3: c += k[2]; /* fall through */
+ case 2: b += k[1]; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1c91f26e2996..bd6d96cf80b1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -11,6 +11,7 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/printk.h>
+#include <linux/build_bug.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@@ -854,9 +855,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @member: the name of the member within the struct.
*
*/
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 65888418fb69..dd056fab9e35 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -172,6 +172,7 @@ struct kimage {
unsigned long start;
struct page *control_code_page;
struct page *swap_page;
+ void *vmcoreinfo_data_copy; /* locates in the crash memory */
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
@@ -241,6 +242,7 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
+extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 8496cf64575c..9520fc3c3b9a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,7 +45,7 @@ struct key_preparsed_payload {
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
time_t expiry; /* Expiry time of key */
-};
+} __randomize_layout;
typedef int (*request_key_actor_t)(struct key_construction *key,
const char *op, void *aux);
@@ -158,7 +158,7 @@ struct key_type {
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
-};
+} __randomize_layout;
extern struct key_type key_type_keyring;
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index c4e441e00db5..655082c88fd9 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -64,7 +64,7 @@ struct subprocess_info {
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
-};
+} __randomize_layout;
extern int
call_usermodehelper(const char *path, char **argv, char **envp, int wait);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index eeab34b0f589..4d800c79475a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -172,7 +172,7 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
-};
+} __randomize_layout;
extern void kset_init(struct kset *kset);
extern int __must_check kset_register(struct kset *kset);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0b50e7b35ed4..648b34cabb38 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -234,7 +234,7 @@ struct kvm_vcpu {
int guest_fpu_loaded, guest_xcr0_loaded;
struct swait_queue_head wq;
- struct pid *pid;
+ struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
struct kvm_vcpu_stat stat;
@@ -390,7 +390,7 @@ struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
+ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
@@ -404,7 +404,7 @@ struct kvm {
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
- struct kvm_io_bus *buses[KVM_NR_BUSES];
+ struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
@@ -473,6 +473,12 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
+{
+ return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
+ lockdep_is_held(&kvm->slots_lock));
+}
+
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
@@ -562,9 +568,8 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- return rcu_dereference_check(kvm->memslots[as_id],
- srcu_read_lock_held(&kvm->srcu)
- || lockdep_is_held(&kvm->slots_lock));
+ return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
+ lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
diff --git a/include/linux/llist.h b/include/linux/llist.h
index d11738110a7a..1957635e6d5f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -93,6 +93,23 @@ static inline void init_llist_head(struct llist_head *list)
container_of(ptr, type, member)
/**
+ * member_address_is_nonnull - check whether the member address is not NULL
+ * @ptr: the object pointer (struct type * that contains the llist_node)
+ * @member: the name of the llist_node within the struct.
+ *
+ * This macro is conceptually the same as
+ * &ptr->member != NULL
+ * but it works around the fact that compilers can decide that taking a member
+ * address is never a NULL pointer.
+ *
+ * Real objects that start at a high address and have a member at NULL are
+ * unlikely to exist, but such pointers may be returned e.g. by the
+ * container_of() macro.
+ */
+#define member_address_is_nonnull(ptr, member) \
+ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
+
+/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
@@ -145,7 +162,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
- &(pos)->member != NULL; \
+ member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
@@ -167,7 +184,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
- &pos->member != NULL && \
+ member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 41f7b6a04d69..3eca67728366 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -192,9 +192,9 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[];
#ifdef CONFIG_LOCKD_V4
-extern struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[];
#endif
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index d39ed1cc5fbf..7acbecc21a40 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -95,19 +95,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *, void *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *, void *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
+int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
+int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
+int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
/*
int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index e58c88b52ce1..bf1645609225 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -23,19 +23,19 @@
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *, void *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *, void *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
+int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
+int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
+int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
/*
int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7a86925ba8f3..3a90febadbe2 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1912,7 +1912,7 @@ struct security_hook_heads {
struct list_head audit_rule_match;
struct list_head audit_rule_free;
#endif /* CONFIG_AUDIT */
-};
+} __randomize_layout;
/*
* Security module hook list structure.
@@ -1923,7 +1923,7 @@ struct security_hook_list {
struct list_head *head;
union security_list_options hook;
char *lsm;
-};
+} __randomize_layout;
/*
* Initializing a security_hook_list structure takes
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4634da521238..3e0d405dc842 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -34,7 +34,7 @@ extern char *migrate_reason_names[MR_TYPES];
static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
if (PageHuge(page))
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d5bed0875d30..aad5d81dfb44 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1068,7 +1068,7 @@ static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
@@ -1105,10 +1105,9 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
- gfp_t gfp);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1124,8 +1123,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
- gfp_t gfp);
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 45cdb27791a3..ff151814a02d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -342,7 +342,7 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-};
+} __randomize_layout;
struct core_thread {
struct task_struct *task;
@@ -500,7 +500,7 @@ struct mm_struct {
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
-};
+} __randomize_layout;
extern struct mm_struct init_mm;
diff --git a/include/linux/module.h b/include/linux/module.h
index 8eb9a1e693e5..e7bdd549e527 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -45,7 +45,7 @@ struct module_kobject {
struct kobject *drivers_dir;
struct module_param_attrs *mp;
struct completion *kobj_completion;
-};
+} __randomize_layout;
struct module_attribute {
struct attribute attr;
@@ -475,7 +475,7 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
-} ____cacheline_aligned;
+} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 8e0352af06b7..1ce85e6fd95f 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -67,7 +67,7 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
-};
+} __randomize_layout;
struct file; /* forward dec */
struct path;
diff --git a/include/linux/msg.h b/include/linux/msg.h
index f3f302f9c197..a001305f5a79 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -29,7 +29,7 @@ struct msg_queue {
struct list_head q_messages;
struct list_head q_receivers;
struct list_head q_senders;
-};
+} __randomize_layout;
/* Helper routines for sys_msgsnd and sys_msgrcv */
extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index de0d889e4fe1..892148c448cc 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -107,6 +107,8 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
#define NAND_STATUS_READY 0x40
#define NAND_STATUS_WP 0x80
+#define NAND_DATA_IFACE_CHECK_ONLY -1
+
/*
* Constants for ECC_MODES
*/
@@ -116,6 +118,7 @@ typedef enum {
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
NAND_ECC_HW_OOB_FIRST,
+ NAND_ECC_ON_DIE,
} nand_ecc_modes_t;
enum nand_ecc_algo {
@@ -257,6 +260,8 @@ struct nand_chip;
/* Vendor-specific feature address (Micron) */
#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
/* ONFI subfeature parameters length */
#define ONFI_SUBFEATURE_PARAM_LEN 4
@@ -477,6 +482,44 @@ static inline void nand_hw_control_init(struct nand_hw_control *nfc)
}
/**
+ * struct nand_ecc_step_info - ECC step information of ECC engine
+ * @stepsize: data bytes per ECC step
+ * @strengths: array of supported strengths
+ * @nstrengths: number of supported strengths
+ */
+struct nand_ecc_step_info {
+ int stepsize;
+ const int *strengths;
+ int nstrengths;
+};
+
+/**
+ * struct nand_ecc_caps - capability of ECC engine
+ * @stepinfos: array of ECC step information
+ * @nstepinfos: number of ECC step information
+ * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
+ */
+struct nand_ecc_caps {
+ const struct nand_ecc_step_info *stepinfos;
+ int nstepinfos;
+ int (*calc_ecc_bytes)(int step_size, int strength);
+};
+
+/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
+#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \
+static const int __name##_strengths[] = { __VA_ARGS__ }; \
+static const struct nand_ecc_step_info __name##_stepinfo = { \
+ .stepsize = __step, \
+ .strengths = __name##_strengths, \
+ .nstrengths = ARRAY_SIZE(__name##_strengths), \
+}; \
+static const struct nand_ecc_caps __name = { \
+ .stepinfos = &__name##_stepinfo, \
+ .nstepinfos = 1, \
+ .calc_ecc_bytes = __calc, \
+}
+
+/**
* struct nand_ecc_ctrl - Control structure for ECC
* @mode: ECC mode
* @algo: ECC algorithm
@@ -815,7 +858,10 @@ struct nand_manufacturer_ops {
* @read_retries: [INTERN] the number of read retry modes supported
* @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
* @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing
+ * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
+ * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ * means the configuration should not be applied but
+ * only checked.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -826,9 +872,6 @@ struct nand_manufacturer_ops {
* structure which is shared among multiple independent
* devices.
* @priv: [OPTIONAL] pointer to private chip data
- * @errstat: [OPTIONAL] hardware specific function to perform
- * additional error status checks (determine if errors are
- * correctable).
* @manufacturer: [INTERN] Contains manufacturer information
*/
@@ -852,16 +895,13 @@ struct nand_chip {
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
- int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
- int status, int page);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
- int (*setup_data_interface)(struct mtd_info *mtd,
- const struct nand_data_interface *conf,
- bool check_only);
+ int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *conf);
int chip_delay;
@@ -1244,6 +1284,15 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
void *extraoob, int extraooblen,
int threshold);
+int nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
+int nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
+int nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
/* Default write_oob implementation */
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
@@ -1258,6 +1307,19 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
+/* Stub used by drivers that do not support GET/SET FEATURES operations */
+int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
+ struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
+
+/* Default read_page_raw implementation */
+int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page);
+
+/* Default write_page_raw implementation */
+int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page);
+
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 06df1e06b6e0..c4beb70dacbd 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -20,6 +20,12 @@
*
* For each partition, these fields are available:
* name: string that will be used to label the partition's MTD device.
+ * types: some partitions can be containers using specific format to describe
+ * embedded subpartitions / volumes. E.g. many home routers use "firmware"
+ * partition that contains at least kernel and rootfs. In such case an
+ * extra parser is needed that will detect these dynamic partitions and
+ * report them to the MTD subsystem. If set this property stores an array
+ * of parser names to use when looking for subpartitions.
* size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
* will extend to the end of the master MTD device.
* offset: absolute starting position within the master MTD device; if
@@ -38,6 +44,7 @@
struct mtd_partition {
const char *name; /* identifier string */
+ const char *const *types; /* names of parsers to use if any */
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index f2a718030476..55faa2f07cca 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -73,6 +73,15 @@
#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
+/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */
+#define SPINOR_OP_READ_1_1_1_DTR 0x0d
+#define SPINOR_OP_READ_1_2_2_DTR 0xbd
+#define SPINOR_OP_READ_1_4_4_DTR 0xed
+
+#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e
+#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe
+#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee
+
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
#define SPINOR_OP_WRDI 0x04 /* Write disable */
@@ -119,13 +128,81 @@
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
-enum read_mode {
- SPI_NOR_NORMAL = 0,
- SPI_NOR_FAST,
- SPI_NOR_DUAL,
- SPI_NOR_QUAD,
+/* Supported SPI protocols */
+#define SNOR_PROTO_INST_MASK GENMASK(23, 16)
+#define SNOR_PROTO_INST_SHIFT 16
+#define SNOR_PROTO_INST(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \
+ SNOR_PROTO_INST_MASK)
+
+#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8)
+#define SNOR_PROTO_ADDR_SHIFT 8
+#define SNOR_PROTO_ADDR(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \
+ SNOR_PROTO_ADDR_MASK)
+
+#define SNOR_PROTO_DATA_MASK GENMASK(7, 0)
+#define SNOR_PROTO_DATA_SHIFT 0
+#define SNOR_PROTO_DATA(_nbits) \
+ ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \
+ SNOR_PROTO_DATA_MASK)
+
+#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */
+
+#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \
+ (SNOR_PROTO_INST(_inst_nbits) | \
+ SNOR_PROTO_ADDR(_addr_nbits) | \
+ SNOR_PROTO_DATA(_data_nbits))
+#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \
+ (SNOR_PROTO_IS_DTR | \
+ SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits))
+
+enum spi_nor_protocol {
+ SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1),
+ SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2),
+ SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4),
+ SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8),
+ SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2),
+ SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4),
+ SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8),
+ SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2),
+ SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4),
+ SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8),
+
+ SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1),
+ SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
+ SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
+ SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
};
+static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
+{
+ return !!(proto & SNOR_PROTO_IS_DTR);
+}
+
+static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >>
+ SNOR_PROTO_INST_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >>
+ SNOR_PROTO_ADDR_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto)
+{
+ return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >>
+ SNOR_PROTO_DATA_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
+{
+ return spi_nor_get_protocol_data_nbits(proto);
+}
+
#define SPI_NOR_MAX_CMD_SIZE 8
enum spi_nor_ops {
SPI_NOR_OPS_READ = 0,
@@ -154,9 +231,11 @@ enum spi_nor_option_flags {
* @read_opcode: the read opcode
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
- * @flash_read: the mode of the read
* @sst_write_second: used by the SST write operation
* @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @read_proto: the SPI protocol for read operations
+ * @write_proto: the SPI protocol for write operations
+ * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
* @cmd_buf: used by the write_reg
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
@@ -185,7 +264,9 @@ struct spi_nor {
u8 read_opcode;
u8 read_dummy;
u8 program_opcode;
- enum read_mode flash_read;
+ enum spi_nor_protocol read_proto;
+ enum spi_nor_protocol write_proto;
+ enum spi_nor_protocol reg_proto;
bool sst_write_second;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
@@ -220,10 +301,71 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
}
/**
+ * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
+ * supported by the SPI controller (bus master).
+ * @mask: the bitmask listing all the supported hw capabilies
+ */
+struct spi_nor_hwcaps {
+ u32 mask;
+};
+
+/*
+ *(Fast) Read capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * As a matter of performances, it is relevant to use Octo SPI protocols first,
+ * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
+ * (Slow) Read.
+ */
+#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
+#define SNOR_HWCAPS_READ BIT(0)
+#define SNOR_HWCAPS_READ_FAST BIT(1)
+#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
+
+#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3)
+#define SNOR_HWCAPS_READ_1_1_2 BIT(3)
+#define SNOR_HWCAPS_READ_1_2_2 BIT(4)
+#define SNOR_HWCAPS_READ_2_2_2 BIT(5)
+#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6)
+
+#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7)
+#define SNOR_HWCAPS_READ_1_1_4 BIT(7)
+#define SNOR_HWCAPS_READ_1_4_4 BIT(8)
+#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
+#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
+
+#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
+#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
+#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
+#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+
+/*
+ * Page Program capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the
+ * legacy SPI 1-1-1 protocol.
+ * Note that Dual Page Programs are not supported because there is no existing
+ * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
+ * implements such commands.
+ */
+#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
+#define SNOR_HWCAPS_PP BIT(16)
+
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+
+#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+
+/**
* spi_nor_scan() - scan the SPI NOR
* @nor: the spi_nor structure
* @name: the chip type name
- * @mode: the read mode supported by the driver
+ * @hwcaps: the hardware capabilities supported by the controller driver
*
* The drivers can use this fuction to scan the SPI NOR.
* In the scanning, it will try to get all the necessary information to
@@ -233,6 +375,7 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
*
* Return: 0 for success, others for failure.
*/
-int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps);
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index abcfa46a2bd9..dda2cc939a53 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -274,6 +274,8 @@ do { \
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
+#define net_get_random_once_wait(buf, nbytes) \
+ get_random_once_wait((buf), (nbytes))
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index a4b97be30b28..22f081065d49 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -61,8 +61,6 @@ typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
-
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
@@ -160,13 +158,6 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
-int nf_register_hook(struct nf_hook_ops *reg);
-void nf_unregister_hook(struct nf_hook_ops *reg);
-int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 1b1ca04820a3..47239c336688 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -479,6 +479,7 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
+ NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index bb0eb2c9acca..e52cc55ac300 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -332,6 +332,7 @@ extern void nfs_zap_caches(struct inode *);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
struct nfs_fattr *, struct nfs4_label *);
+struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index e418a1096662..74c44665e6d3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -42,6 +42,7 @@ struct nfs_client {
#define NFS_CS_MIGRATION 2 /* - transparent state migr */
#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
+#define NFS_CS_TSM_POSSIBLE 5 /* - Maybe state migration */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -210,6 +211,7 @@ struct nfs_server {
unsigned long mig_status;
#define NFS_MIG_IN_TRANSITION (1)
#define NFS_MIG_FAILED (2)
+#define NFS_MIG_TSM_POSSIBLE (3)
void (*destroy)(struct nfs_server *);
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 247cc3d3498f..d67b67ae6c8b 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -33,6 +33,8 @@ enum {
PG_UPTODATE, /* page group sync bit in read path */
PG_WB_END, /* page group sync bit in write path */
PG_REMOVE, /* page group sync bit in write path */
+ PG_CONTENDED1, /* Is someone waiting for a lock? */
+ PG_CONTENDED2, /* Is someone waiting for a lock? */
};
struct nfs_inode;
@@ -93,8 +95,8 @@ struct nfs_pageio_descriptor {
const struct rpc_call_ops *pg_rpc_callops;
const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
+ struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
- void *pg_layout_private;
unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b28c83475ee8..ca3bcc4ed4e5 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -878,7 +878,7 @@ struct nfs3_readdirargs {
struct nfs_fh * fh;
__u64 cookie;
__be32 verf[2];
- int plus;
+ bool plus;
unsigned int count;
struct page ** pages;
};
@@ -909,7 +909,7 @@ struct nfs3_linkres {
struct nfs3_readdirres {
struct nfs_fattr * dir_attr;
__be32 * verf;
- int plus;
+ bool plus;
};
struct nfs3_getaclres {
@@ -1012,7 +1012,6 @@ struct nfs4_link_res {
struct nfs_fattr * dir_attr;
};
-
struct nfs4_lookup_arg {
struct nfs4_sequence_args seq_args;
const struct nfs_fh * dir_fh;
@@ -1028,6 +1027,20 @@ struct nfs4_lookup_res {
struct nfs4_label *label;
};
+struct nfs4_lookupp_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ const u32 *bitmask;
+};
+
+struct nfs4_lookupp_res {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server *server;
+ struct nfs_fattr *fattr;
+ struct nfs_fh *fh;
+ struct nfs4_label *label;
+};
+
struct nfs4_lookup_root_arg {
struct nfs4_sequence_args seq_args;
const u32 * bitmask;
@@ -1053,7 +1066,7 @@ struct nfs4_readdir_arg {
struct page ** pages; /* zero-copy data */
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
- int plus;
+ bool plus;
};
struct nfs4_readdir_res {
@@ -1422,6 +1435,7 @@ enum {
NFS_IOHDR_STAT,
};
+struct nfs_io_completion;
struct nfs_pgio_header {
struct inode *inode;
struct rpc_cred *cred;
@@ -1435,8 +1449,8 @@ struct nfs_pgio_header {
void (*release) (struct nfs_pgio_header *hdr);
const struct nfs_pgio_completion_ops *completion_ops;
const struct nfs_rw_ops *rw_ops;
+ struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
- void *layout_private;
spinlock_t lock;
/* fields protected by lock */
int pnfs_error;
@@ -1533,6 +1547,7 @@ struct nfs_renamedata {
struct nfs_fattr new_fattr;
void (*complete)(struct rpc_task *, struct nfs_renamedata *);
long timeout;
+ bool cancelled;
};
struct nfs_access_entry;
@@ -1567,6 +1582,8 @@ struct nfs_rpc_ops {
int (*lookup) (struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *,
struct nfs4_label *);
+ int (*lookupp) (struct inode *, struct nfs_fh *,
+ struct nfs_fattr *, struct nfs4_label *);
int (*access) (struct inode *, struct nfs_access_entry *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
@@ -1585,7 +1602,7 @@ struct nfs_rpc_ops {
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct dentry *, struct rpc_cred *,
- u64, struct page **, unsigned int, int);
+ u64, struct page **, unsigned int, bool);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
@@ -1595,7 +1612,7 @@ struct nfs_rpc_ops {
int (*pathconf) (struct nfs_server *, struct nfs_fh *,
struct nfs_pathconf *);
int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
- int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
+ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool);
int (*pgio_rpc_prepare)(struct rpc_task *,
struct nfs_pgio_header *);
void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aa3cd0878270..8aa01fd859fb 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -6,18 +6,26 @@
#include <linux/sched.h>
#include <asm/irq.h>
+#if defined(CONFIG_HAVE_NMI_WATCHDOG)
+#include <asm/nmi.h>
+#endif
#ifdef CONFIG_LOCKUP_DETECTOR
+void lockup_detector_init(void);
+#else
+static inline void lockup_detector_init(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
-extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
-extern unsigned int hardlockup_panic;
-void lockup_detector_init(void);
+extern int soft_watchdog_enabled;
+extern atomic_t watchdog_park_in_progress;
#else
static inline void touch_softlockup_watchdog_sched(void)
{
@@ -31,9 +39,6 @@ static inline void touch_softlockup_watchdog_sync(void)
static inline void touch_all_softlockup_watchdogs(void)
{
}
-static inline void lockup_detector_init(void)
-{
-}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -61,6 +66,21 @@ static inline void reset_hung_task_detector(void)
#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+#if defined(CONFIG_HARDLOCKUP_DETECTOR)
+extern void hardlockup_detector_disable(void);
+extern unsigned int hardlockup_panic;
+#else
+static inline void hardlockup_detector_disable(void) {}
+#endif
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
+extern void arch_touch_nmi_watchdog(void);
+#else
+#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void arch_touch_nmi_watchdog(void) {}
+#endif
+#endif
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -68,21 +88,11 @@ static inline void reset_hung_task_detector(void)
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
*/
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
-#include <asm/nmi.h>
-extern void touch_nmi_watchdog(void);
-#else
static inline void touch_nmi_watchdog(void)
{
+ arch_touch_nmi_watchdog();
touch_softlockup_watchdog();
}
-#endif
-
-#if defined(CONFIG_HARDLOCKUP_DETECTOR)
-extern void hardlockup_detector_disable(void);
-#else
-static inline void hardlockup_detector_disable(void) {}
-#endif
/*
* Create trigger_all_cpu_backtrace() out of the arch-provided
@@ -139,15 +149,18 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
}
#endif
-#ifdef CONFIG_LOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+#endif
+
+#ifdef CONFIG_LOCKUP_DETECTOR
extern int nmi_watchdog_enabled;
-extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
+extern struct cpumask watchdog_cpumask;
extern unsigned long *watchdog_cpumask_bits;
-extern atomic_t watchdog_park_in_progress;
+extern int __read_mostly watchdog_suspended;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index de87ceac110e..609e232c00da 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -5,6 +5,7 @@
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,6 +19,7 @@
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ * Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -106,6 +108,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo)
* @NTB_SPEED_GEN1: Link is trained to gen1 speed.
* @NTB_SPEED_GEN2: Link is trained to gen2 speed.
* @NTB_SPEED_GEN3: Link is trained to gen3 speed.
+ * @NTB_SPEED_GEN4: Link is trained to gen4 speed.
*/
enum ntb_speed {
NTB_SPEED_AUTO = -1,
@@ -113,6 +116,7 @@ enum ntb_speed {
NTB_SPEED_GEN1 = 1,
NTB_SPEED_GEN2 = 2,
NTB_SPEED_GEN3 = 3,
+ NTB_SPEED_GEN4 = 4
};
/**
@@ -140,6 +144,20 @@ enum ntb_width {
};
/**
+ * enum ntb_default_port - NTB default port number
+ * @NTB_PORT_PRI_USD: Default port of the NTB_TOPO_PRI/NTB_TOPO_B2B_USD
+ * topologies
+ * @NTB_PORT_SEC_DSD: Default port of the NTB_TOPO_SEC/NTB_TOPO_B2B_DSD
+ * topologies
+ */
+enum ntb_default_port {
+ NTB_PORT_PRI_USD,
+ NTB_PORT_SEC_DSD
+};
+#define NTB_DEF_PEER_CNT (1)
+#define NTB_DEF_PEER_IDX (0)
+
+/**
* struct ntb_client_ops - ntb client operations
* @probe: Notify client of a new device.
* @remove: Notify client to remove a device.
@@ -162,10 +180,12 @@ static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
* struct ntb_ctx_ops - ntb driver context operations
* @link_event: See ntb_link_event().
* @db_event: See ntb_db_event().
+ * @msg_event: See ntb_msg_event().
*/
struct ntb_ctx_ops {
void (*link_event)(void *ctx);
void (*db_event)(void *ctx, int db_vector);
+ void (*msg_event)(void *ctx);
};
static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
@@ -174,18 +194,27 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
return
/* ops->link_event && */
/* ops->db_event && */
+ /* ops->msg_event && */
1;
}
/**
* struct ntb_ctx_ops - ntb device operations
- * @mw_count: See ntb_mw_count().
- * @mw_get_range: See ntb_mw_get_range().
- * @mw_set_trans: See ntb_mw_set_trans().
- * @mw_clear_trans: See ntb_mw_clear_trans().
+ * @port_number: See ntb_port_number().
+ * @peer_port_count: See ntb_peer_port_count().
+ * @peer_port_number: See ntb_peer_port_number().
+ * @peer_port_idx: See ntb_peer_port_idx().
* @link_is_up: See ntb_link_is_up().
* @link_enable: See ntb_link_enable().
* @link_disable: See ntb_link_disable().
+ * @mw_count: See ntb_mw_count().
+ * @mw_get_align: See ntb_mw_get_align().
+ * @mw_set_trans: See ntb_mw_set_trans().
+ * @mw_clear_trans: See ntb_mw_clear_trans().
+ * @peer_mw_count: See ntb_peer_mw_count().
+ * @peer_mw_get_addr: See ntb_peer_mw_get_addr().
+ * @peer_mw_set_trans: See ntb_peer_mw_set_trans().
+ * @peer_mw_clear_trans:See ntb_peer_mw_clear_trans().
* @db_is_unsafe: See ntb_db_is_unsafe().
* @db_valid_mask: See ntb_db_valid_mask().
* @db_vector_count: See ntb_db_vector_count().
@@ -210,22 +239,43 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
* @peer_spad_addr: See ntb_peer_spad_addr().
* @peer_spad_read: See ntb_peer_spad_read().
* @peer_spad_write: See ntb_peer_spad_write().
+ * @msg_count: See ntb_msg_count().
+ * @msg_inbits: See ntb_msg_inbits().
+ * @msg_outbits: See ntb_msg_outbits().
+ * @msg_read_sts: See ntb_msg_read_sts().
+ * @msg_clear_sts: See ntb_msg_clear_sts().
+ * @msg_set_mask: See ntb_msg_set_mask().
+ * @msg_clear_mask: See ntb_msg_clear_mask().
+ * @msg_read: See ntb_msg_read().
+ * @msg_write: See ntb_msg_write().
*/
struct ntb_dev_ops {
- int (*mw_count)(struct ntb_dev *ntb);
- int (*mw_get_range)(struct ntb_dev *ntb, int idx,
- phys_addr_t *base, resource_size_t *size,
- resource_size_t *align, resource_size_t *align_size);
- int (*mw_set_trans)(struct ntb_dev *ntb, int idx,
- dma_addr_t addr, resource_size_t size);
- int (*mw_clear_trans)(struct ntb_dev *ntb, int idx);
+ int (*port_number)(struct ntb_dev *ntb);
+ int (*peer_port_count)(struct ntb_dev *ntb);
+ int (*peer_port_number)(struct ntb_dev *ntb, int pidx);
+ int (*peer_port_idx)(struct ntb_dev *ntb, int port);
- int (*link_is_up)(struct ntb_dev *ntb,
+ u64 (*link_is_up)(struct ntb_dev *ntb,
enum ntb_speed *speed, enum ntb_width *width);
int (*link_enable)(struct ntb_dev *ntb,
enum ntb_speed max_speed, enum ntb_width max_width);
int (*link_disable)(struct ntb_dev *ntb);
+ int (*mw_count)(struct ntb_dev *ntb, int pidx);
+ int (*mw_get_align)(struct ntb_dev *ntb, int pidx, int widx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max);
+ int (*mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
+ dma_addr_t addr, resource_size_t size);
+ int (*mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
+ int (*peer_mw_count)(struct ntb_dev *ntb);
+ int (*peer_mw_get_addr)(struct ntb_dev *ntb, int widx,
+ phys_addr_t *base, resource_size_t *size);
+ int (*peer_mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
+ u64 addr, resource_size_t size);
+ int (*peer_mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
+
int (*db_is_unsafe)(struct ntb_dev *ntb);
u64 (*db_valid_mask)(struct ntb_dev *ntb);
int (*db_vector_count)(struct ntb_dev *ntb);
@@ -252,32 +302,55 @@ struct ntb_dev_ops {
int (*spad_is_unsafe)(struct ntb_dev *ntb);
int (*spad_count)(struct ntb_dev *ntb);
- u32 (*spad_read)(struct ntb_dev *ntb, int idx);
- int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+ u32 (*spad_read)(struct ntb_dev *ntb, int sidx);
+ int (*spad_write)(struct ntb_dev *ntb, int sidx, u32 val);
- int (*peer_spad_addr)(struct ntb_dev *ntb, int idx,
+ int (*peer_spad_addr)(struct ntb_dev *ntb, int pidx, int sidx,
phys_addr_t *spad_addr);
- u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx);
- int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+ u32 (*peer_spad_read)(struct ntb_dev *ntb, int pidx, int sidx);
+ int (*peer_spad_write)(struct ntb_dev *ntb, int pidx, int sidx,
+ u32 val);
+
+ int (*msg_count)(struct ntb_dev *ntb);
+ u64 (*msg_inbits)(struct ntb_dev *ntb);
+ u64 (*msg_outbits)(struct ntb_dev *ntb);
+ u64 (*msg_read_sts)(struct ntb_dev *ntb);
+ int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
+ int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
+ int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
+ int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg);
+ int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg);
};
static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
{
/* commented callbacks are not required: */
return
- ops->mw_count &&
- ops->mw_get_range &&
- ops->mw_set_trans &&
- /* ops->mw_clear_trans && */
+ /* Port operations are required for multiport devices */
+ !ops->peer_port_count == !ops->port_number &&
+ !ops->peer_port_number == !ops->port_number &&
+ !ops->peer_port_idx == !ops->port_number &&
+
+ /* Link operations are required */
ops->link_is_up &&
ops->link_enable &&
ops->link_disable &&
+
+ /* One or both MW interfaces should be developed */
+ ops->mw_count &&
+ ops->mw_get_align &&
+ (ops->mw_set_trans ||
+ ops->peer_mw_set_trans) &&
+ /* ops->mw_clear_trans && */
+ ops->peer_mw_count &&
+ ops->peer_mw_get_addr &&
+ /* ops->peer_mw_clear_trans && */
+
+ /* Doorbell operations are mostly required */
/* ops->db_is_unsafe && */
ops->db_valid_mask &&
-
/* both set, or both unset */
- (!ops->db_vector_count == !ops->db_vector_mask) &&
-
+ (!ops->db_vector_count == !ops->db_vector_mask) &&
ops->db_read &&
/* ops->db_set && */
ops->db_clear &&
@@ -291,13 +364,24 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* ops->peer_db_read_mask && */
/* ops->peer_db_set_mask && */
/* ops->peer_db_clear_mask && */
- /* ops->spad_is_unsafe && */
- ops->spad_count &&
- ops->spad_read &&
- ops->spad_write &&
- /* ops->peer_spad_addr && */
- /* ops->peer_spad_read && */
- ops->peer_spad_write &&
+
+ /* Scrachpads interface is optional */
+ /* !ops->spad_is_unsafe == !ops->spad_count && */
+ !ops->spad_read == !ops->spad_count &&
+ !ops->spad_write == !ops->spad_count &&
+ /* !ops->peer_spad_addr == !ops->spad_count && */
+ /* !ops->peer_spad_read == !ops->spad_count && */
+ !ops->peer_spad_write == !ops->spad_count &&
+
+ /* Messaging interface is optional */
+ !ops->msg_inbits == !ops->msg_count &&
+ !ops->msg_outbits == !ops->msg_count &&
+ !ops->msg_read_sts == !ops->msg_count &&
+ !ops->msg_clear_sts == !ops->msg_count &&
+ /* !ops->msg_set_mask == !ops->msg_count && */
+ /* !ops->msg_clear_mask == !ops->msg_count && */
+ !ops->msg_read == !ops->msg_count &&
+ !ops->msg_write == !ops->msg_count &&
1;
}
@@ -310,13 +394,12 @@ struct ntb_client {
struct device_driver drv;
const struct ntb_client_ops ops;
};
-
#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
/**
* struct ntb_device - ntb device
* @dev: Linux device object.
- * @pdev: Pci device entry of the ntb.
+ * @pdev: PCI device entry of the ntb.
* @topo: Detected topology of the ntb.
* @ops: See &ntb_dev_ops.
* @ctx: See &ntb_ctx_ops.
@@ -337,7 +420,6 @@ struct ntb_dev {
/* block unregister until device is fully released */
struct completion released;
};
-
#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
/**
@@ -434,86 +516,152 @@ void ntb_link_event(struct ntb_dev *ntb);
* multiple interrupt vectors for doorbells, the vector number indicates which
* vector received the interrupt. The vector number is relative to the first
* vector used for doorbells, starting at zero, and must be less than
- ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which
+ * ntb_db_vector_count(). The driver may call ntb_db_read() to check which
* doorbell bits need service, and ntb_db_vector_mask() to determine which of
* those bits are associated with the vector number.
*/
void ntb_db_event(struct ntb_dev *ntb, int vector);
/**
- * ntb_mw_count() - get the number of memory windows
+ * ntb_msg_event() - notify driver context of a message event
* @ntb: NTB device context.
*
- * Hardware and topology may support a different number of memory windows.
+ * Notify the driver context of a message event. If hardware supports
+ * message registers, this event indicates, that a new message arrived in
+ * some incoming message register or last sent message couldn't be delivered.
+ * The events can be masked/unmasked by the methods ntb_msg_set_mask() and
+ * ntb_msg_clear_mask().
+ */
+void ntb_msg_event(struct ntb_dev *ntb);
+
+/**
+ * ntb_default_port_number() - get the default local port number
+ * @ntb: NTB device context.
*
- * Return: the number of memory windows.
+ * If hardware driver doesn't specify port_number() callback method, the NTB
+ * is considered with just two ports. So this method returns default local
+ * port number in compliance with topology.
+ *
+ * NOTE Don't call this method directly. The ntb_port_number() function should
+ * be used instead.
+ *
+ * Return: the default local port number
+ */
+int ntb_default_port_number(struct ntb_dev *ntb);
+
+/**
+ * ntb_default_port_count() - get the default number of peer device ports
+ * @ntb: NTB device context.
+ *
+ * By default hardware driver supports just one peer device.
+ *
+ * NOTE Don't call this method directly. The ntb_peer_port_count() function
+ * should be used instead.
+ *
+ * Return: the default number of peer ports
+ */
+int ntb_default_peer_port_count(struct ntb_dev *ntb);
+
+/**
+ * ntb_default_peer_port_number() - get the default peer port by given index
+ * @ntb: NTB device context.
+ * @idx: Peer port index (should not differ from zero).
+ *
+ * By default hardware driver supports just one peer device, so this method
+ * shall return the corresponding value from enum ntb_default_port.
+ *
+ * NOTE Don't call this method directly. The ntb_peer_port_number() function
+ * should be used instead.
+ *
+ * Return: the peer device port or negative value indicating an error
+ */
+int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx);
+
+/**
+ * ntb_default_peer_port_idx() - get the default peer device port index by
+ * given port number
+ * @ntb: NTB device context.
+ * @port: Peer port number (should be one of enum ntb_default_port).
+ *
+ * By default hardware driver supports just one peer device, so while
+ * specified port-argument indicates peer port from enum ntb_default_port,
+ * the return value shall be zero.
+ *
+ * NOTE Don't call this method directly. The ntb_peer_port_idx() function
+ * should be used instead.
+ *
+ * Return: the peer port index or negative value indicating an error
+ */
+int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port);
+
+/**
+ * ntb_port_number() - get the local port number
+ * @ntb: NTB device context.
+ *
+ * Hardware must support at least simple two-ports ntb connection
+ *
+ * Return: the local port number
*/
-static inline int ntb_mw_count(struct ntb_dev *ntb)
+static inline int ntb_port_number(struct ntb_dev *ntb)
{
- return ntb->ops->mw_count(ntb);
+ if (!ntb->ops->port_number)
+ return ntb_default_port_number(ntb);
+
+ return ntb->ops->port_number(ntb);
}
/**
- * ntb_mw_get_range() - get the range of a memory window
+ * ntb_peer_port_count() - get the number of peer device ports
* @ntb: NTB device context.
- * @idx: Memory window number.
- * @base: OUT - the base address for mapping the memory window
- * @size: OUT - the size for mapping the memory window
- * @align: OUT - the base alignment for translating the memory window
- * @align_size: OUT - the size alignment for translating the memory window
*
- * Get the range of a memory window. NULL may be given for any output
- * parameter if the value is not needed. The base and size may be used for
- * mapping the memory window, to access the peer memory. The alignment and
- * size may be used for translating the memory window, for the peer to access
- * memory on the local system.
+ * Hardware may support an access to memory of several remote domains
+ * over multi-port NTB devices. This method returns the number of peers,
+ * local device can have shared memory with.
*
- * Return: Zero on success, otherwise an error number.
+ * Return: the number of peer ports
*/
-static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx,
- phys_addr_t *base, resource_size_t *size,
- resource_size_t *align, resource_size_t *align_size)
+static inline int ntb_peer_port_count(struct ntb_dev *ntb)
{
- return ntb->ops->mw_get_range(ntb, idx, base, size,
- align, align_size);
+ if (!ntb->ops->peer_port_count)
+ return ntb_default_peer_port_count(ntb);
+
+ return ntb->ops->peer_port_count(ntb);
}
/**
- * ntb_mw_set_trans() - set the translation of a memory window
+ * ntb_peer_port_number() - get the peer port by given index
* @ntb: NTB device context.
- * @idx: Memory window number.
- * @addr: The dma address local memory to expose to the peer.
- * @size: The size of the local memory to expose to the peer.
+ * @pidx: Peer port index.
*
- * Set the translation of a memory window. The peer may access local memory
- * through the window starting at the address, up to the size. The address
- * must be aligned to the alignment specified by ntb_mw_get_range(). The size
- * must be aligned to the size alignment specified by ntb_mw_get_range().
+ * Peer ports are continuously enumerated by NTB API logic, so this method
+ * lets to retrieve port real number by its index.
*
- * Return: Zero on success, otherwise an error number.
+ * Return: the peer device port or negative value indicating an error
*/
-static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
- dma_addr_t addr, resource_size_t size)
+static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
{
- return ntb->ops->mw_set_trans(ntb, idx, addr, size);
+ if (!ntb->ops->peer_port_number)
+ return ntb_default_peer_port_number(ntb, pidx);
+
+ return ntb->ops->peer_port_number(ntb, pidx);
}
/**
- * ntb_mw_clear_trans() - clear the translation of a memory window
+ * ntb_peer_port_idx() - get the peer device port index by given port number
* @ntb: NTB device context.
- * @idx: Memory window number.
+ * @port: Peer port number.
*
- * Clear the translation of a memory window. The peer may no longer access
- * local memory through the window.
+ * Inverse operation of ntb_peer_port_number(), so one can get port index
+ * by specified port number.
*
- * Return: Zero on success, otherwise an error number.
+ * Return: the peer port index or negative value indicating an error
*/
-static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
+static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port)
{
- if (!ntb->ops->mw_clear_trans)
- return ntb->ops->mw_set_trans(ntb, idx, 0, 0);
+ if (!ntb->ops->peer_port_idx)
+ return ntb_default_peer_port_idx(ntb, port);
- return ntb->ops->mw_clear_trans(ntb, idx);
+ return ntb->ops->peer_port_idx(ntb, port);
}
/**
@@ -526,25 +674,26 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
* state once after every link event. It is safe to query the link state in
* the context of the link event callback.
*
- * Return: One if the link is up, zero if the link is down, otherwise a
- * negative value indicating the error number.
+ * Return: bitfield of indexed ports link state: bit is set/cleared if the
+ * link is up/down respectively.
*/
-static inline int ntb_link_is_up(struct ntb_dev *ntb,
+static inline u64 ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed, enum ntb_width *width)
{
return ntb->ops->link_is_up(ntb, speed, width);
}
/**
- * ntb_link_enable() - enable the link on the secondary side of the ntb
+ * ntb_link_enable() - enable the local port ntb connection
* @ntb: NTB device context.
* @max_speed: The maximum link speed expressed as PCIe generation number.
* @max_width: The maximum link width expressed as the number of PCIe lanes.
*
- * Enable the link on the secondary side of the ntb. This can only be done
- * from the primary side of the ntb in primary or b2b topology. The ntb device
- * should train the link to its maximum speed and width, or the requested speed
- * and width, whichever is smaller, if supported.
+ * Enable the NTB/PCIe link on the local or remote (for bridge-to-bridge
+ * topology) side of the bridge. If it's supported the ntb device should train
+ * the link to its maximum speed and width, or the requested speed and width,
+ * whichever is smaller. Some hardware doesn't support PCIe link training, so
+ * the last two arguments will be ignored then.
*
* Return: Zero on success, otherwise an error number.
*/
@@ -556,14 +705,14 @@ static inline int ntb_link_enable(struct ntb_dev *ntb,
}
/**
- * ntb_link_disable() - disable the link on the secondary side of the ntb
+ * ntb_link_disable() - disable the local port ntb connection
* @ntb: NTB device context.
*
- * Disable the link on the secondary side of the ntb. This can only be
- * done from the primary side of the ntb in primary or b2b topology. The ntb
- * device should disable the link. Returning from this call must indicate that
- * a barrier has passed, though with no more writes may pass in either
- * direction across the link, except if this call returns an error number.
+ * Disable the link on the local or remote (for b2b topology) of the ntb.
+ * The ntb device should disable the link. Returning from this call must
+ * indicate that a barrier has passed, though with no more writes may pass in
+ * either direction across the link, except if this call returns an error
+ * number.
*
* Return: Zero on success, otherwise an error number.
*/
@@ -573,6 +722,183 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
}
/**
+ * ntb_mw_count() - get the number of inbound memory windows, which could
+ * be created for a specified peer device
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ * Moreover different peer devices can support different number of memory
+ * windows. Simply speaking this method returns the number of possible inbound
+ * memory windows to share with specified peer device.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ return ntb->ops->mw_count(ntb, pidx);
+}
+
+/**
+ * ntb_mw_get_align() - get the restriction parameters of inbound memory window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ * @addr_align: OUT - the base alignment for translating the memory window
+ * @size_align: OUT - the size alignment for translating the memory window
+ * @size_max: OUT - the maximum size of the memory window
+ *
+ * Get the alignments of an inbound memory window with specified index.
+ * NULL may be given for any output parameter if the value is not needed.
+ * The alignment and size parameters may be used for allocation of proper
+ * shared memory.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
+ size_max);
+}
+
+/**
+ * ntb_mw_set_trans() - set the translation of an inbound memory window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ * @addr: The dma address of local memory to expose to the peer.
+ * @size: The size of the local memory to expose to the peer.
+ *
+ * Set the translation of a memory window. The peer may access local memory
+ * through the window starting at the address, up to the size. The address
+ * and size must be aligned in compliance with restrictions of
+ * ntb_mw_get_align(). The region size should not exceed the size_max parameter
+ * of that method.
+ *
+ * This method may not be implemented due to the hardware specific memory
+ * windows interface.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ dma_addr_t addr, resource_size_t size)
+{
+ if (!ntb->ops->mw_set_trans)
+ return 0;
+
+ return ntb->ops->mw_set_trans(ntb, pidx, widx, addr, size);
+}
+
+/**
+ * ntb_mw_clear_trans() - clear the translation address of an inbound memory
+ * window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ *
+ * Clear the translation of an inbound memory window. The peer may no longer
+ * access local memory through the window.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int pidx, int widx)
+{
+ if (!ntb->ops->mw_clear_trans)
+ return ntb_mw_set_trans(ntb, pidx, widx, 0, 0);
+
+ return ntb->ops->mw_clear_trans(ntb, pidx, widx);
+}
+
+/**
+ * ntb_peer_mw_count() - get the number of outbound memory windows, which could
+ * be mapped to access a shared memory
+ * @ntb: NTB device context.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ * This method returns the number of outbound memory windows supported by
+ * local device.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb->ops->peer_mw_count(ntb);
+}
+
+/**
+ * ntb_peer_mw_get_addr() - get map address of an outbound memory window
+ * @ntb: NTB device context.
+ * @widx: Memory window index (within ntb_peer_mw_count() return value).
+ * @base: OUT - the base address of mapping region.
+ * @size: OUT - the size of mapping region.
+ *
+ * Get base and size of memory region to map. NULL may be given for any output
+ * parameter if the value is not needed. The base and size may be used for
+ * mapping the memory window, to access the peer memory.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ return ntb->ops->peer_mw_get_addr(ntb, widx, base, size);
+}
+
+/**
+ * ntb_peer_mw_set_trans() - set a translation address of a memory window
+ * retrieved from a peer device
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device the translation address received from.
+ * @widx: Memory window index.
+ * @addr: The dma address of the shared memory to access.
+ * @size: The size of the shared memory to access.
+ *
+ * Set the translation of an outbound memory window. The local device may
+ * access shared memory allocated by a peer device sent the address.
+ *
+ * This method may not be implemented due to the hardware specific memory
+ * windows interface, so a translation address can be only set on the side,
+ * where shared memory (inbound memory windows) is allocated.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ u64 addr, resource_size_t size)
+{
+ if (!ntb->ops->peer_mw_set_trans)
+ return 0;
+
+ return ntb->ops->peer_mw_set_trans(ntb, pidx, widx, addr, size);
+}
+
+/**
+ * ntb_peer_mw_clear_trans() - clear the translation address of an outbound
+ * memory window
+ * @ntb: NTB device context.
+ * @pidx: Port index of peer device.
+ * @widx: Memory window index.
+ *
+ * Clear the translation of a outbound memory window. The local device may no
+ * longer access a shared memory through the window.
+ *
+ * This method may not be implemented due to the hardware specific memory
+ * windows interface.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
+ int widx)
+{
+ if (!ntb->ops->peer_mw_clear_trans)
+ return ntb_peer_mw_set_trans(ntb, pidx, widx, 0, 0);
+
+ return ntb->ops->peer_mw_clear_trans(ntb, pidx, widx);
+}
+
+/**
* ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
* @ntb: NTB device context.
*
@@ -900,47 +1226,58 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
* @ntb: NTB device context.
*
* Hardware and topology may support a different number of scratchpads.
+ * Although it must be the same for all ports per NTB device.
*
* Return: the number of scratchpads.
*/
static inline int ntb_spad_count(struct ntb_dev *ntb)
{
+ if (!ntb->ops->spad_count)
+ return 0;
+
return ntb->ops->spad_count(ntb);
}
/**
* ntb_spad_read() - read the local scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @sidx: Scratchpad index.
*
* Read the local scratchpad register, and return the value.
*
* Return: The value of the local scratchpad register.
*/
-static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx)
+static inline u32 ntb_spad_read(struct ntb_dev *ntb, int sidx)
{
- return ntb->ops->spad_read(ntb, idx);
+ if (!ntb->ops->spad_read)
+ return ~(u32)0;
+
+ return ntb->ops->spad_read(ntb, sidx);
}
/**
* ntb_spad_write() - write the local scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @sidx: Scratchpad index.
* @val: Scratchpad value.
*
* Write the value to the local scratchpad register.
*
* Return: Zero on success, otherwise an error number.
*/
-static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
{
- return ntb->ops->spad_write(ntb, idx, val);
+ if (!ntb->ops->spad_write)
+ return -EINVAL;
+
+ return ntb->ops->spad_write(ntb, sidx, val);
}
/**
* ntb_peer_spad_addr() - address of the peer scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @pidx: Port index of peer device.
+ * @sidx: Scratchpad index.
* @spad_addr: OUT - The address of the peer scratchpad register.
*
* Return the address of the peer doorbell register. This may be used, for
@@ -948,45 +1285,213 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
*
* Return: Zero on success, otherwise an error number.
*/
-static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
phys_addr_t *spad_addr)
{
if (!ntb->ops->peer_spad_addr)
return -EINVAL;
- return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
+ return ntb->ops->peer_spad_addr(ntb, pidx, sidx, spad_addr);
}
/**
* ntb_peer_spad_read() - read the peer scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @pidx: Port index of peer device.
+ * @sidx: Scratchpad index.
*
* Read the peer scratchpad register, and return the value.
*
* Return: The value of the local scratchpad register.
*/
-static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
if (!ntb->ops->peer_spad_read)
- return 0;
+ return ~(u32)0;
- return ntb->ops->peer_spad_read(ntb, idx);
+ return ntb->ops->peer_spad_read(ntb, pidx, sidx);
}
/**
* ntb_peer_spad_write() - write the peer scratchpad register
* @ntb: NTB device context.
- * @idx: Scratchpad index.
+ * @pidx: Port index of peer device.
+ * @sidx: Scratchpad index.
* @val: Scratchpad value.
*
* Write the value to the peer scratchpad register.
*
* Return: Zero on success, otherwise an error number.
*/
-static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
+ u32 val)
+{
+ if (!ntb->ops->peer_spad_write)
+ return -EINVAL;
+
+ return ntb->ops->peer_spad_write(ntb, pidx, sidx, val);
+}
+
+/**
+ * ntb_msg_count() - get the number of message registers
+ * @ntb: NTB device context.
+ *
+ * Hardware may support a different number of message registers.
+ *
+ * Return: the number of message registers.
+ */
+static inline int ntb_msg_count(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->msg_count)
+ return 0;
+
+ return ntb->ops->msg_count(ntb);
+}
+
+/**
+ * ntb_msg_inbits() - get a bitfield of inbound message registers status
+ * @ntb: NTB device context.
+ *
+ * The method returns the bitfield of status and mask registers, which related
+ * to inbound message registers.
+ *
+ * Return: bitfield of inbound message registers.
+ */
+static inline u64 ntb_msg_inbits(struct ntb_dev *ntb)
{
- return ntb->ops->peer_spad_write(ntb, idx, val);
+ if (!ntb->ops->msg_inbits)
+ return 0;
+
+ return ntb->ops->msg_inbits(ntb);
+}
+
+/**
+ * ntb_msg_outbits() - get a bitfield of outbound message registers status
+ * @ntb: NTB device context.
+ *
+ * The method returns the bitfield of status and mask registers, which related
+ * to outbound message registers.
+ *
+ * Return: bitfield of outbound message registers.
+ */
+static inline u64 ntb_msg_outbits(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->msg_outbits)
+ return 0;
+
+ return ntb->ops->msg_outbits(ntb);
+}
+
+/**
+ * ntb_msg_read_sts() - read the message registers status
+ * @ntb: NTB device context.
+ *
+ * Read the status of message register. Inbound and outbound message registers
+ * related bits can be filtered by masks retrieved from ntb_msg_inbits() and
+ * ntb_msg_outbits().
+ *
+ * Return: status bits of message registers
+ */
+static inline u64 ntb_msg_read_sts(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->msg_read_sts)
+ return 0;
+
+ return ntb->ops->msg_read_sts(ntb);
+}
+
+/**
+ * ntb_msg_clear_sts() - clear status bits of message registers
+ * @ntb: NTB device context.
+ * @sts_bits: Status bits to clear.
+ *
+ * Clear bits in the status register.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
+{
+ if (!ntb->ops->msg_clear_sts)
+ return -EINVAL;
+
+ return ntb->ops->msg_clear_sts(ntb, sts_bits);
+}
+
+/**
+ * ntb_msg_set_mask() - set mask of message register status bits
+ * @ntb: NTB device context.
+ * @mask_bits: Mask bits.
+ *
+ * Mask the message registers status bits from raising the message event.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+ if (!ntb->ops->msg_set_mask)
+ return -EINVAL;
+
+ return ntb->ops->msg_set_mask(ntb, mask_bits);
+}
+
+/**
+ * ntb_msg_clear_mask() - clear message registers mask
+ * @ntb: NTB device context.
+ * @mask_bits: Mask bits to clear.
+ *
+ * Clear bits in the message events mask register.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
+{
+ if (!ntb->ops->msg_clear_mask)
+ return -EINVAL;
+
+ return ntb->ops->msg_clear_mask(ntb, mask_bits);
+}
+
+/**
+ * ntb_msg_read() - read message register with specified index
+ * @ntb: NTB device context.
+ * @midx: Message register index
+ * @pidx: OUT - Port index of peer device a message retrieved from
+ * @msg: OUT - Data
+ *
+ * Read data from the specified message register. Source port index of a
+ * message is retrieved as well.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
+ u32 *msg)
+{
+ if (!ntb->ops->msg_read)
+ return -EINVAL;
+
+ return ntb->ops->msg_read(ntb, midx, pidx, msg);
+}
+
+/**
+ * ntb_msg_write() - write data to the specified message register
+ * @ntb: NTB device context.
+ * @midx: Message register index
+ * @pidx: Port index of peer device a message being sent to
+ * @msg: Data to send
+ *
+ * Send data to a specified peer device using the defined message register.
+ * Message event can be raised if the midx registers isn't empty while
+ * calling this method and the corresponding interrupt isn't masked.
+ *
+ * Return: Zero on success, otherwise a negative error number.
+ */
+static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx,
+ u32 msg)
+{
+ if (!ntb->ops->msg_write)
+ return -EINVAL;
+
+ return ntb->ops->msg_write(ntb, midx, pidx, msg);
}
#endif
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index cd93416d762e..497706f5adca 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -12,6 +12,9 @@
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/err.h>
+#include <linux/errno.h>
+
struct nvmem_device;
struct nvmem_cell_info;
typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
diff --git a/include/linux/once.h b/include/linux/once.h
index 285f12cb40e6..9c98aaa87cbc 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -53,5 +53,7 @@ void __do_once_done(bool *done, struct static_key *once_key,
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
+#define get_random_once_wait(buf, nbytes) \
+ DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/path.h b/include/linux/path.h
index d1372186f431..cde895cc4af4 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -7,7 +7,7 @@ struct vfsmount;
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
-};
+} __randomize_layout;
extern void path_get(const struct path *);
extern void path_put(const struct path *);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index c2a989dee876..b09136f88cf4 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -52,7 +52,7 @@ struct pid_namespace {
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
-};
+} __randomize_layout;
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h
index 7fa1fbefc3f2..cc7554ae6e8b 100644
--- a/include/linux/platform_data/usb-ohci-s3c2410.h
+++ b/include/linux/platform_data/usb-ohci-s3c2410.h
@@ -31,7 +31,7 @@ struct s3c2410_hcd_info {
void (*report_oc)(struct s3c2410_hcd_info *, int ports);
};
-static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
+static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
{
if (info->report_oc != NULL) {
(info->report_oc)(info, ports);
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 58ab28d81fc2..06844b54dfc1 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -21,7 +21,7 @@ struct proc_ns_operations {
int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
struct user_namespace *(*owner)(struct ns_common *ns);
struct ns_common *(*get_parent)(struct ns_common *ns);
-};
+} __randomize_layout;
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
diff --git a/include/linux/random.h b/include/linux/random.h
index ed5c3838780d..eafea6a09361 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
+extern int wait_for_random_bytes(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
@@ -57,6 +58,52 @@ static inline unsigned long get_random_long(void)
#endif
}
+/*
+ * On 64-bit architectures, protect against non-terminated C string overflows
+ * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ */
+#ifdef CONFIG_64BIT
+# ifdef __LITTLE_ENDIAN
+# define CANARY_MASK 0xffffffffffffff00UL
+# else /* big endian, 64 bits: */
+# define CANARY_MASK 0x00ffffffffffffffUL
+# endif
+#else /* 32 bits: */
+# define CANARY_MASK 0xffffffffUL
+#endif
+
+static inline unsigned long get_random_canary(void)
+{
+ unsigned long val = get_random_long();
+
+ return val & CANARY_MASK;
+}
+
+/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
+ * Returns the result of the call to wait_for_random_bytes. */
+static inline int get_random_bytes_wait(void *buf, int nbytes)
+{
+ int ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
+ get_random_bytes(buf, nbytes);
+ return 0;
+}
+
+#define declare_get_random_var_wait(var) \
+ static inline int get_random_ ## var ## _wait(var *out) { \
+ int ret = wait_for_random_bytes(); \
+ if (unlikely(ret)) \
+ return ret; \
+ *out = get_random_ ## var(); \
+ return 0; \
+ }
+declare_get_random_var_wait(u32)
+declare_get_random_var_wait(u64)
+declare_get_random_var_wait(int)
+declare_get_random_var_wait(long)
+#undef declare_get_random_var
+
unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b693adac853b..0a0f0d14a5fb 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/nvmem-provider.h>
#include <uapi/linux/rtc.h>
extern int rtc_month_days(unsigned int month, unsigned int year);
@@ -32,17 +33,11 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
}
-/**
- * Deprecated. Use rtc_time64_to_tm().
- */
static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
{
rtc_time64_to_tm(time, tm);
}
-/**
- * Deprecated. Use rtc_tm_to_time64().
- */
static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
{
*time = rtc_tm_to_time64(tm);
@@ -116,7 +111,6 @@ struct rtc_device {
struct module *owner;
int id;
- char name[RTC_DEVICE_NAME_SIZE];
const struct rtc_class_ops *ops;
struct mutex ops_lock;
@@ -143,6 +137,14 @@ struct rtc_device {
/* Some hardware can't support UIE mode */
int uie_unsupported;
+ bool registered;
+
+ struct nvmem_config *nvmem_config;
+ struct nvmem_device *nvmem;
+ /* Old ABI support */
+ bool nvram_old_abi;
+ struct bin_attribute *nvram;
+
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
struct work_struct uie_task;
struct timer_list uie_timer;
@@ -164,6 +166,8 @@ extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
const struct rtc_class_ops *ops,
struct module *owner);
+struct rtc_device *devm_rtc_allocate_device(struct device *dev);
+int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern void rtc_device_unregister(struct rtc_device *rtc);
extern void devm_rtc_device_unregister(struct device *dev,
struct rtc_device *rtc);
@@ -219,6 +223,9 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
+#define rtc_register_device(device) \
+ __rtc_register_device(THIS_MODULE, device)
+
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
extern int rtc_hctosys_ret;
#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 20814b7d7d70..8337e2db0bb2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -426,7 +426,7 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
-};
+} __randomize_layout;
struct sched_dl_entity {
struct rb_node rb_node;
@@ -526,6 +526,13 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
+
+ /*
+ * This begins the randomizable portion of task_struct. Only
+ * scheduling-critical items should be added above here.
+ */
+ randomized_struct_fields_start
+
void *stack;
atomic_t usage;
/* Per task flags (PF_*), defined further below: */
@@ -974,6 +981,7 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
+ unsigned int fail_nth;
#endif
/*
* When (nr_dirtied >= nr_dirtied_pause), it's time to call
@@ -1078,6 +1086,13 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
+
+ /*
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
+ */
+ randomized_struct_fields_end
+
/* CPU-specific state of this task: */
struct thread_struct thread;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c06d63b3a583..2a0dd40b15db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -222,7 +222,7 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
-};
+} __randomize_layout;
/*
* Bits in flags field of signal_struct.
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 9edec926e9d9..de2deb8676bd 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -8,11 +8,29 @@
struct task_struct;
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ int semval; /* current value */
+ /*
+ * PID of the process that last modified the semaphore. For
+ * Linux, specifically these are:
+ * - semop
+ * - semctl, via SETVAL and SETALL.
+ * - at task exit when performing undo adjustments (see exit_sem).
+ */
+ int sempid;
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
+ struct list_head pending_alter; /* pending single-sop operations */
+ /* that alter the semaphore */
+ struct list_head pending_const; /* pending single-sop operations */
+ /* that do not alter the semaphore*/
+ time_t sem_otime; /* candidate for sem_otime */
+} ____cacheline_aligned_in_smp;
+
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
- time_t sem_ctime; /* last change time */
- struct sem *sem_base; /* ptr to first semaphore in array */
+ time_t sem_ctime; /* create/last semctl() time */
struct list_head pending_alter; /* pending operations */
/* that alter the array */
struct list_head pending_const; /* pending complex operations */
@@ -21,7 +39,9 @@ struct sem_array {
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
unsigned int use_global_lock;/* >0: global lock required */
-};
+
+ struct sem sems[];
+} __randomize_layout;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 04e881829625..0fb7061ec54c 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -22,7 +22,7 @@ struct shmid_kernel /* private to the kernel */
/* The task created the shm object. NULL if the task is dead. */
struct task_struct *shm_creator;
struct list_head shm_clist; /* list by creator */
-};
+} __randomize_layout;
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 04a7f7993e67..41473df6dfb0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -471,7 +471,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
*
* %__GFP_NOWARN - If allocation fails, don't issue any warnings.
*
- * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
+ * eventually.
*
* There are other flags available as well, but these are not intended
* for general use, and so are not documented here. For a full list of
diff --git a/include/linux/string.h b/include/linux/string.h
index 7439d83eaa33..a467e617eeb0 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -137,6 +137,7 @@ extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
@@ -193,4 +194,205 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
+#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+
+#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE char *strcat(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+{
+ size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ if (__builtin_constant_p(len) && len >= p_size)
+ __write_overflow();
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __builtin_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return ret;
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __builtin_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memset(p, c, size);
+}
+
+__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcpy(p, q, size);
+}
+
+__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memmove(p, q, size);
+}
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __read_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+
+/* defined after fortified strlen and memcpy to reuse them */
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strcpy(p, q);
+ memcpy(p, q, strlen(q) + 1);
+ return p;
+}
+
+#endif
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6095ecba0dde..55ef67bea06b 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -39,7 +39,7 @@ struct rpc_clnt {
struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */
struct rpc_xprt __rcu * cl_xprt; /* transport */
- struct rpc_procinfo * cl_procinfo; /* procedure info */
+ const struct rpc_procinfo *cl_procinfo; /* procedure info */
u32 cl_prog, /* RPC program number */
cl_vers, /* RPC version number */
cl_maxproc; /* max procedure number */
@@ -87,7 +87,8 @@ struct rpc_program {
struct rpc_version {
u32 number; /* version number */
unsigned int nrprocs; /* number of procs */
- struct rpc_procinfo * procs; /* procedure array */
+ const struct rpc_procinfo *procs; /* procedure array */
+ unsigned int *counts; /* call counts */
};
/*
@@ -99,7 +100,6 @@ struct rpc_procinfo {
kxdrdproc_t p_decode; /* XDR decode function */
unsigned int p_arglen; /* argument hdr length (u32) */
unsigned int p_replen; /* reply hdr length (u32) */
- unsigned int p_count; /* call count */
unsigned int p_timer; /* Which RTT timer to use */
u32 p_statidx; /* Which procedure to account */
const char * p_name; /* name of procedure */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 9d7529ffc4ce..50a99a117da7 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -22,7 +22,7 @@
*/
struct rpc_procinfo;
struct rpc_message {
- struct rpc_procinfo * rpc_proc; /* Procedure information */
+ const struct rpc_procinfo *rpc_proc; /* Procedure information */
void * rpc_argp; /* Arguments */
void * rpc_resp; /* Result */
struct rpc_cred * rpc_cred; /* Credentials */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 11cef5a7bc87..a3f8af9bd543 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -237,7 +237,7 @@ struct svc_rqst {
struct svc_serv * rq_server; /* RPC service definition */
struct svc_pool * rq_pool; /* thread pool */
- struct svc_procedure * rq_procinfo; /* procedure info */
+ const struct svc_procedure *rq_procinfo;/* procedure info */
struct auth_ops * rq_authop; /* authentication flavour */
struct svc_cred rq_cred; /* auth info */
void * rq_xprt_ctxt; /* transport specific context ptr */
@@ -246,7 +246,7 @@ struct svc_rqst {
size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
struct xdr_buf rq_res;
- struct page * rq_pages[RPCSVC_MAXPAGES];
+ struct page *rq_pages[RPCSVC_MAXPAGES + 1];
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
@@ -384,7 +384,7 @@ struct svc_program {
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
unsigned int pg_nvers; /* number of versions */
- struct svc_version ** pg_vers; /* version array */
+ const struct svc_version **pg_vers; /* version array */
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
struct svc_stat * pg_stats; /* rpc statistics */
@@ -397,7 +397,8 @@ struct svc_program {
struct svc_version {
u32 vs_vers; /* version number */
u32 vs_nproc; /* number of procedures */
- struct svc_procedure * vs_proc; /* per-procedure info */
+ const struct svc_procedure *vs_proc; /* per-procedure info */
+ unsigned int *vs_count; /* call counts */
u32 vs_xdrsize; /* xdrsize needed for this version */
/* Don't register with rpcbind */
@@ -419,15 +420,17 @@ struct svc_version {
/*
* RPC procedure info
*/
-typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
struct svc_procedure {
- svc_procfunc pc_func; /* process the request */
- kxdrproc_t pc_decode; /* XDR decode args */
- kxdrproc_t pc_encode; /* XDR encode result */
- kxdrproc_t pc_release; /* XDR free result */
+ /* process the request: */
+ __be32 (*pc_func)(struct svc_rqst *);
+ /* XDR decode args: */
+ int (*pc_decode)(struct svc_rqst *, __be32 *data);
+ /* XDR encode result: */
+ int (*pc_encode)(struct svc_rqst *, __be32 *data);
+ /* XDR free result: */
+ void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
unsigned int pc_ressize; /* result struct size */
- unsigned int pc_count; /* call count */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
};
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f3787d800ba4..995c6fe9ee90 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -77,46 +77,25 @@ extern atomic_t rdma_stat_sq_prod;
*/
struct svc_rdma_op_ctxt {
struct list_head list;
- struct svc_rdma_op_ctxt *read_hdr;
- struct svc_rdma_fastreg_mr *frmr;
- int hdr_count;
struct xdr_buf arg;
struct ib_cqe cqe;
- struct ib_cqe reg_cqe;
- struct ib_cqe inv_cqe;
u32 byte_len;
- u32 position;
struct svcxprt_rdma *xprt;
- unsigned long flags;
enum dma_data_direction direction;
int count;
unsigned int mapped_sges;
+ int hdr_count;
struct ib_send_wr send_wr;
struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
struct page *pages[RPCSVC_MAXPAGES];
};
-struct svc_rdma_fastreg_mr {
- struct ib_mr *mr;
- struct scatterlist *sg;
- int sg_nents;
- unsigned long access_flags;
- enum dma_data_direction direction;
- struct list_head frmr_list;
-};
-
-#define RDMACTXT_F_LAST_CTXT 2
-
-#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */
-#define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */
-
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
int sc_max_sge;
- int sc_max_sge_rd; /* max sge for read target */
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
atomic_t sc_sq_avail; /* SQEs ready to be consumed */
@@ -141,14 +120,6 @@ struct svcxprt_rdma {
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq;
- int (*sc_reader)(struct svcxprt_rdma *,
- struct svc_rqst *,
- struct svc_rdma_op_ctxt *,
- int *, u32 *, u32, u32, u64, bool);
- u32 sc_dev_caps; /* distilled device caps */
- unsigned int sc_frmr_pg_list_len;
- struct list_head sc_frmr_q;
- spinlock_t sc_frmr_q_lock;
spinlock_t sc_lock; /* transport lock */
@@ -185,20 +156,14 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
__be32 *rdma_resp,
struct xdr_buf *rcvbuf);
-/* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
-
/* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *);
-extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
- struct svc_rdma_op_ctxt *, int *, u32 *,
- u32, u32, u64, bool);
-extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
- struct svc_rdma_op_ctxt *, int *, u32 *,
- u32, u32, u64, bool);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
+extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_op_ctxt *head, __be32 *p);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
__be32 *wr_ch, struct xdr_buf *xdr);
extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
@@ -226,9 +191,6 @@ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
-extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
-extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
- struct svc_rdma_fastreg_mr *);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 054c8cde18f3..261b48a2701d 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -17,6 +17,8 @@
#include <asm/unaligned.h>
#include <linux/scatterlist.h>
+struct rpc_rqst;
+
/*
* Buffer adjustment
*/
@@ -33,13 +35,6 @@ struct xdr_netobj {
};
/*
- * This is the legacy generic XDR function. rqstp is either a rpc_rqst
- * (client side) or svc_rqst pointer (server side).
- * Encode functions always assume there's enough room in the buffer.
- */
-typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
-
-/*
* Basic structure for transmission/reception of a client XDR message.
* Features a header (for a linear buffer containing RPC headers
* and the data payload for short messages), and then an array of
@@ -222,8 +217,10 @@ struct xdr_stream {
/*
* These are the xdr_stream style generic XDR encode and decode functions.
*/
-typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
-typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *obj);
+typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *obj);
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 80d07816def0..1d4dba490fb6 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -47,6 +47,9 @@ extern int proc_douintvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
@@ -117,7 +120,7 @@ struct ctl_table
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
-};
+} __randomize_layout;
struct ctl_node {
struct rb_node node;
@@ -143,7 +146,7 @@ struct ctl_table_header
struct ctl_table_set *set;
struct ctl_dir *parent;
struct ctl_node *node;
- struct list_head inodes; /* head for proc_inode->sysctl_inodes */
+ struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
};
struct ctl_dir {
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 69464c0d8068..79c30daf46a9 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -332,7 +332,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
-};
+} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
struct tty_file_private {
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index b742b5e47cc2..00b2213f6a35 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -291,7 +291,7 @@ struct tty_operations {
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
const struct file_operations *proc_fops;
-};
+} __randomize_layout;
struct tty_driver {
int magic; /* magic number for this structure */
@@ -325,7 +325,7 @@ struct tty_driver {
const struct tty_operations *ops;
struct list_head tty_drivers;
-};
+} __randomize_layout;
extern struct list_head tty_drivers;
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 021f7a88f52c..1a59699cf82a 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -83,6 +83,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
+#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 32354b4b4b2b..b3575ce29148 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -66,7 +66,7 @@ struct user_namespace {
#endif
struct ucounts *ucounts;
int ucount_max[UCOUNT_COUNTS];
-};
+} __randomize_layout;
struct ucounts {
struct hlist_node node;
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 60f0bb83b313..da826ed059cf 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -26,7 +26,7 @@ struct uts_namespace {
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
-};
+} __randomize_layout;
extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index f57076b958b7..586809abb273 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -97,6 +97,8 @@ extern void vfio_unregister_iommu_driver(
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
+extern bool vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index b582339ccef5..7af9d769b97d 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -157,6 +157,18 @@ struct p9_client {
enum p9_trans_status status;
void *trans;
+ union {
+ struct {
+ int rfd;
+ int wfd;
+ } fd;
+ struct {
+ u16 port;
+ bool privport;
+
+ } tcp;
+ } trans_opts;
+
struct p9_idpool *fidpool;
struct list_head fidlist;
@@ -213,6 +225,7 @@ struct p9_dirent {
struct iov_iter;
+int p9_show_client_options(struct seq_file *m, struct p9_client *clnt);
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
const char *name);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 5122b5e40f78..1625fb842ac4 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -62,6 +62,7 @@ struct p9_trans_module {
int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
struct iov_iter *, struct iov_iter *, int , int, int);
+ int (*show_options)(struct seq_file *, struct p9_client *);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 3b3194b2fc65..afb37f835449 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -37,7 +37,7 @@ struct unix_skb_parms {
u32 secid; /* Security ID */
#endif
u32 consumed;
-};
+} __randomize_layout;
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index afc39e3a3f7c..9816df225af3 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -156,7 +156,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
-};
+} __randomize_layout;
struct neigh_ops {
int family;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 31a2b51bef2c..1c401bd4c2e0 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -148,7 +148,7 @@ struct net {
#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
-};
+} __randomize_layout;
#include <linux/seq_file_net.h>
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 01709172b3d3..ef8e6c3a80a6 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,8 +98,8 @@
* nla_put_u8(skb, type, value) add u8 attribute to skb
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
- * nla_put_u64_64bits(skb, type,
- * value, padattr) add u64 attribute to skb
+ * nla_put_u64_64bit(skb, type,
+ * value, padattr) add u64 attribute to skb
* nla_put_s8(skb, type, value) add s8 attribute to skb
* nla_put_s16(skb, type, value) add s16 attribute to skb
* nla_put_s32(skb, type, value) add s32 attribute to skb
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a9519a06a23b..980807d7506f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
+ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <\
+ (void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
pos.v += SCTP_PAD4(ntohs(pos.p->length)))
@@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
#define _sctp_walk_errors(err, chunk_hdr, end)\
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(struct sctp_chunkhdr));\
+ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <\
+ (void *)chunk_hdr + end) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
diff --git a/include/net/sock.h b/include/net/sock.h
index f69c8c2782df..7c0632c7e870 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1128,7 +1128,7 @@ struct proto {
atomic_t socks;
#endif
int (*diag_destroy)(struct sock *sk, int err);
-};
+} __randomize_layout;
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 356953d3dbd1..b5732432bb29 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1056,7 +1056,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
- IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
+ /* FREE = 1 << 7, */
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
/* reserve bits 26-31 for low level drivers' internal use */
@@ -2948,6 +2948,22 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr);
/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct ib_udata *udata);
+
+/**
* ib_modify_qp - Modifies the attributes for the specified QP and then
* transitions the QP to the given state.
* @qp: The QP to modify.
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 4878aaf7bdff..55af69271053 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -229,8 +229,7 @@ struct rvt_driver_provided {
* ERR_PTR(err). The driver is free to return NULL or a valid
* pointer.
*/
- void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
+ void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
* Free the driver's private qp structure.
@@ -319,7 +318,7 @@ struct rvt_driver_provided {
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num, gfp_t gfp);
+ enum ib_qp_type type, u8 port_num);
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 8260700d662b..8c285d9a06d8 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -158,6 +158,7 @@
#define READ_32 0x09
#define VERIFY_32 0x0a
#define WRITE_32 0x0b
+#define WRITE_VERIFY_32 0x0c
#define WRITE_SAME_32 0x0d
#define ATA_32 0x1ff0
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 5f17fb770477..0ca1fb08805b 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -66,6 +66,14 @@ struct sock;
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
+/*
+ * Used to control the sending of keys with optional to respond state bit,
+ * as a workaround for non RFC compliant initiators,that do not propose,
+ * nor respond to specific keys required for login to complete.
+ *
+ * See iscsi_check_proposer_for_optional_reply() for more details.
+ */
+#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -560,7 +568,6 @@ struct iscsi_conn {
#define LOGIN_FLAGS_INITIAL_PDU 8
unsigned long login_flags;
struct delayed_work login_work;
- struct delayed_work login_cleanup_work;
struct iscsi_login *login;
struct timer_list nopin_timer;
struct timer_list nopin_response_timer;
@@ -769,6 +776,7 @@ struct iscsi_tpg_attrib {
u8 t10_pi;
u32 fabric_prot_type;
u32 tpg_enabled_sendtargets;
+ u32 login_keys_workaround;
struct iscsi_portal_group *tpg;
};
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index e475531565fd..e150e391878b 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -2,6 +2,7 @@
#define TARGET_CORE_BACKEND_H
#include <linux/types.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#define TRANSPORT_FLAG_PASSTHROUGH 0x1
@@ -29,16 +30,13 @@ struct target_backend_ops {
struct se_device *(*alloc_device)(struct se_hba *, const char *);
int (*configure_device)(struct se_device *);
+ void (*destroy_device)(struct se_device *);
void (*free_device)(struct se_device *device);
ssize_t (*set_configfs_dev_params)(struct se_device *,
const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
- void (*transport_complete)(struct se_cmd *cmd,
- struct scatterlist *,
- unsigned char *);
-
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
@@ -71,6 +69,8 @@ void target_backend_unregister(const struct target_backend_ops *);
void target_complete_cmd(struct se_cmd *, u8);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
+void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
+
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
@@ -104,9 +104,18 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
+struct se_device *target_find_device(int id, bool do_depend);
+
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q);
+
+/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
+static inline uint32_t get_unaligned_be24(const uint8_t *const p)
+{
+ return get_unaligned_be32(p - 1) & 0xffffffU;
+}
+
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 0c1dce2ac6f0..516764febeb7 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -188,7 +188,8 @@ enum target_sc_flags_table {
TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
- TARGET_SCF_USE_CPUID = 0x08,
+ TARGET_SCF_USE_CPUID = 0x08,
+ TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10,
};
/* fabric independent task management function values */
@@ -218,7 +219,6 @@ enum tcm_tmrsp_table {
*/
typedef enum {
SCSI_INST_INDEX,
- SCSI_DEVICE_INDEX,
SCSI_AUTH_INTR_INDEX,
SCSI_INDEX_TYPE_MAX
} scsi_index_t;
@@ -701,8 +701,6 @@ struct scsi_port_stats {
struct se_lun {
u64 unpacked_lun;
-#define SE_LUN_LINK_MAGIC 0xffff7771
- u32 lun_link_magic;
bool lun_shutdown;
bool lun_access_ro;
u32 lun_index;
@@ -746,8 +744,6 @@ struct se_dev_stat_grps {
};
struct se_device {
-#define SE_DEV_LINK_MAGIC 0xfeeddeef
- u32 dev_link_magic;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@@ -800,7 +796,6 @@ struct se_device {
struct list_head delayed_cmd_list;
struct list_head state_list;
struct list_head qf_cmd_list;
- struct list_head g_dev_node;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
/* T10 Inquiry and VPD WWN Information */
@@ -819,8 +814,6 @@ struct se_device {
unsigned char udev_path[SE_UDEV_PATH_LEN];
/* Pointer to template of function pointers for transport */
const struct target_backend_ops *transport;
- /* Linked list for struct se_hba struct se_device list */
- struct list_head dev_list;
struct se_lun xcopy_lun;
/* Protection Information */
int prot_length;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index d7dd1427fe0d..33d2e3e5773c 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -160,6 +160,7 @@ int target_get_sess_cmd(struct se_cmd *, bool);
int target_put_sess_cmd(struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *);
+void target_show_cmd(const char *pfx, struct se_cmd *cmd);
int core_alua_check_nonop_delay(struct se_cmd *);
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 10e3663a75a6..8e50d01c645f 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -34,7 +34,7 @@
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
{(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
- {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \
+ {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
{(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
{(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
diff --git a/include/uapi/linux/kcmp.h b/include/uapi/linux/kcmp.h
index 84df14b37360..481e103da78e 100644
--- a/include/uapi/linux/kcmp.h
+++ b/include/uapi/linux/kcmp.h
@@ -1,6 +1,8 @@
#ifndef _UAPI_LINUX_KCMP_H
#define _UAPI_LINUX_KCMP_H
+#include <linux/types.h>
+
/* Comparison type */
enum kcmp_type {
KCMP_FILE,
@@ -10,8 +12,16 @@ enum kcmp_type {
KCMP_SIGHAND,
KCMP_IO,
KCMP_SYSVSEM,
+ KCMP_EPOLL_TFD,
KCMP_TYPES,
};
+/* Slot for KCMP_EPOLL_TFD */
+struct kcmp_epoll_slot {
+ __u32 efd; /* epoll file descriptor */
+ __u32 tfd; /* target file number */
+ __u32 toff; /* target offset within same numbered sequence */
+};
+
#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index c0b6dfec5f87..6cd63c18708a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -927,6 +927,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_CMMA_MIGRATION 145
#define KVM_CAP_PPC_FWNMI 146
#define KVM_CAP_PPC_SMT_POSSIBLE 147
+#define KVM_CAP_HYPERV_SYNIC2 148
+#define KVM_CAP_HYPERV_VP_INDEX 149
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1351,7 +1353,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_X86_SMM */
#define KVM_SMI _IO(KVMIO, 0xb7)
/* Available with KVM_CAP_S390_CMMA_MIGRATION */
-#define KVM_S390_GET_CMMA_BITS _IOW(KVMIO, 0xb8, struct kvm_s390_cmma_log)
+#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index dd73b908b2f3..67eb90361692 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -23,7 +23,7 @@
struct semid_ds {
struct ipc_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ __kernel_time_t sem_ctime; /* create/last semctl() time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct sem_queue *sem_pending; /* pending operations to be processed */
struct sem_queue **sem_pending_last; /* last pending operation */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index af17b4154ef6..24a1c4ec2248 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -130,6 +130,11 @@ enum tcmu_genl_cmd {
TCMU_CMD_UNSPEC,
TCMU_CMD_ADDED_DEVICE,
TCMU_CMD_REMOVED_DEVICE,
+ TCMU_CMD_RECONFIG_DEVICE,
+ TCMU_CMD_ADDED_DEVICE_DONE,
+ TCMU_CMD_REMOVED_DEVICE_DONE,
+ TCMU_CMD_RECONFIG_DEVICE_DONE,
+ TCMU_CMD_SET_FEATURES,
__TCMU_CMD_MAX,
};
#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
@@ -138,6 +143,13 @@ enum tcmu_genl_attr {
TCMU_ATTR_UNSPEC,
TCMU_ATTR_DEVICE,
TCMU_ATTR_MINOR,
+ TCMU_ATTR_PAD,
+ TCMU_ATTR_DEV_CFG,
+ TCMU_ATTR_DEV_SIZE,
+ TCMU_ATTR_WRITECACHE,
+ TCMU_ATTR_CMD_STATUS,
+ TCMU_ATTR_DEVICE_ID,
+ TCMU_ATTR_SUPP_KERN_CMD_REPLY,
__TCMU_ATTR_MAX,
};
#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)