summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild9
-rw-r--r--include/asm-generic/bug.h53
-rw-r--r--include/asm-generic/export.h15
-rw-r--r--include/asm-generic/pgalloc.h13
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/crypto/pkcs7.h4
-rw-r--r--include/drm/amd_asic_type.h4
-rw-r--r--include/drm/bridge/analogix_dp.h4
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_agpsupport.h14
-rw-r--r--include/drm/drm_connector.h32
-rw-r--r--include/drm/drm_crtc.h14
-rw-r--r--include/drm/drm_dp_helper.h4
-rw-r--r--include/drm/drm_dp_mst_helper.h11
-rw-r--r--include/drm/drm_drv.h104
-rw-r--r--include/drm/drm_gem.h26
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h7
-rw-r--r--include/drm/drm_gem_shmem_helper.h15
-rw-r--r--include/drm/drm_gem_vram_helper.h30
-rw-r--r--include/drm/drm_hdcp.h9
-rw-r--r--include/drm/drm_ioctl.h3
-rw-r--r--include/drm/drm_mipi_dbi.h188
-rw-r--r--include/drm/drm_mode_config.h6
-rw-r--r--include/drm/drm_panel.h184
-rw-r--r--include/drm/drm_prime.h41
-rw-r--r--include/drm/drm_self_refresh_helper.h6
-rw-r--r--include/drm/drm_sysfs.h5
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/drm/drm_vram_mm_helper.h2
-rw-r--r--include/drm/i915_component.h2
-rw-r--r--include/drm/i915_drm.h13
-rw-r--r--include/drm/i915_pciids.h18
-rw-r--r--include/drm/tinydrm/mipi-dbi.h117
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h75
-rw-r--r--include/drm/ttm/ttm_bo_api.h41
-rw-r--r--include/drm/ttm/ttm_bo_driver.h26
-rw-r--r--include/dt-bindings/bus/ti-sysc.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h113
-rw-r--r--include/dt-bindings/clock/bcm2835.h2
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h3
-rw-r--r--include/dt-bindings/clock/ingenic,tcu.h20
-rw-r--r--include/dt-bindings/clock/jz4740-cgu.h1
-rw-r--r--include/dt-bindings/clock/mt6779-clk.h436
-rw-r--r--include/dt-bindings/clock/mt8183-clk.h4
-rw-r--r--include/dt-bindings/clock/omap5.h3
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h3
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8150.h243
-rw-r--r--include/dt-bindings/clock/rk3308-cru.h387
-rw-r--r--include/dt-bindings/clock/sun8i-v3s-ccu.h4
-rw-r--r--include/dt-bindings/gce/mt8183-gce.h175
-rw-r--r--include/dt-bindings/pinctrl/k3.h3
-rw-r--r--include/dt-bindings/reset-controller/mt8183-resets.h81
-rw-r--r--include/dt-bindings/reset/hisi,hi6220-resets.h7
-rw-r--r--include/dt-bindings/reset/sun8i-v3s-ccu.h3
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/amba/clcd-regs.h1
-rw-r--r--include/linux/backlight.h8
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/messenger.h1
-rw-r--r--include/linux/ceph/mon_client.h1
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/clk.h17
-rw-r--r--include/linux/clk/clk-conf.h5
-rw-r--r--include/linux/compaction.h22
-rw-r--r--include/linux/compiler_types.h37
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/crash_dump.h14
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence.h34
-rw-r--r--include/linux/dma-mapping.h34
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--include/linux/dma-resv.h (renamed from include/linux/reservation.h)186
-rw-r--r--include/linux/export.h110
-rw-r--r--include/linux/extable.h2
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/fb.h7
-rw-r--r--include/linux/fs.h42
-rw-r--r--include/linux/fs_context.h5
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/hid.h43
-rw-r--r--include/linux/hmm.h125
-rw-r--r--include/linux/huge_mm.h9
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/hyperv.h16
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h3
-rw-r--r--include/linux/ima.h9
-rw-r--r--include/linux/input.h26
-rw-r--r--include/linux/input/bu21013.h34
-rw-r--r--include/linux/interval_tree_generic.h22
-rw-r--r--include/linux/iomap.h10
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h23
-rw-r--r--include/linux/kexec.h6
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h12
-rw-r--r--include/linux/lcd.h10
-rw-r--r--include/linux/libnvdimm.h9
-rw-r--r--include/linux/lsm_hooks.h22
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h3
-rw-r--r--include/linux/mem_encrypt.h15
-rw-r--r--include/linux/memcontrol.h23
-rw-r--r--include/linux/memory.h7
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/memremap.h4
-rw-r--r--include/linux/mfd/cros_ec.h292
-rw-r--r--include/linux/mfd/da9063/pdata.h60
-rw-r--r--include/linux/mfd/intel_soc_pmic_mrfld.h81
-rw-r--r--include/linux/mfd/mt6397/core.h11
-rw-r--r--include/linux/mfd/syscon.h6
-rw-r--r--include/linux/migrate.h120
-rw-r--r--include/linux/mlx5/device.h9
-rw-r--r--include/linux/mm.h87
-rw-r--r--include/linux/mm_types.h21
-rw-r--r--include/linux/mm_types_task.h4
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmu_notifier.h59
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/module_signature.h46
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/mtd/sharpsl.h5
-rw-r--r--include/linux/mtd/spi-nor.h291
-rw-r--r--include/linux/mtd/super.h3
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/nvme-fc-driver.h2
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/pagewalk.h66
-rw-r--r--include/linux/pci-aspm.h36
-rw-r--r--include/linux/pci-p2pdma.h28
-rw-r--r--include/linux/pci.h133
-rw-r--r--include/linux/pci_hotplug.h100
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/platform_data/cros_ec_chardev.h38
-rw-r--r--include/linux/platform_data/cros_ec_commands.h (renamed from include/linux/mfd/cros_ec_commands.h)12
-rw-r--r--include/linux/platform_data/cros_ec_proto.h319
-rw-r--r--include/linux/platform_data/keypad-w90p910.h16
-rw-r--r--include/linux/platform_data/pinctrl-single.h6
-rw-r--r--include/linux/platform_data/ti-sysc.h8
-rw-r--r--include/linux/printk.h22
-rw-r--r--include/linux/pwm.h4
-rw-r--r--include/linux/qed/qed_rdma_if.h2
-rw-r--r--include/linux/quicklist.h94
-rw-r--r--include/linux/quotaops.h2
-rw-r--r--include/linux/ramfs.h6
-rw-r--r--include/linux/rbtree_augmented.h88
-rw-r--r--include/linux/rcuwait.h20
-rw-r--r--include/linux/root_dev.h1
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/mm.h10
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/security.h69
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/slab.h62
-rw-r--r--include/linux/soc/amlogic/meson-canvas.h1
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h3
-rw-r--r--include/linux/soundwire/sdw.h20
-rw-r--r--include/linux/soundwire/sdw_intel.h1
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/sunrpc/cache.h7
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/linux/sunrpc/xdr.h2
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/t10-pi.h14
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/time64.h2
-rw-r--r--include/linux/uaccess.h21
-rw-r--r--include/linux/verification.h10
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/vmalloc.h22
-rw-r--r--include/linux/zpool.h3
-rw-r--r--include/rdma/ib.h2
-rw-r--r--include/rdma/ib_umem.h2
-rw-r--r--include/rdma/ib_umem_odp.h58
-rw-r--r--include/rdma/ib_verbs.h81
-rw-r--r--include/rdma/iw_portmap.h3
-rw-r--r--include/rdma/opa_port_info.h2
-rw-r--r--include/rdma/rdma_netlink.h10
-rw-r--r--include/rdma/rdma_vt.h1
-rw-r--r--include/rdma/rdmavt_cq.h1
-rw-r--r--include/rdma/rdmavt_qp.h35
-rw-r--r--include/rdma/signature.h2
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/scsi/scsi_host.h16
-rw-r--r--include/trace/events/rpcrdma.h88
-rw-r--r--include/trace/events/writeback.h38
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h4
-rw-r--r--include/uapi/drm/drm_mode.h1
-rw-r--r--include/uapi/drm/etnaviv_drm.h10
-rw-r--r--include/uapi/drm/i915_drm.h1
-rw-r--r--include/uapi/drm/panfrost_drm.h64
-rw-r--r--include/uapi/linux/coff.h5
-rw-r--r--include/uapi/linux/dm-ioctl.h6
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/fuse.h12
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/kvm.h2
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/nfsd/cld.h41
-rw-r--r--include/uapi/linux/pci_regs.h15
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/vfio.h71
-rw-r--r--include/uapi/linux/virtio_fs.h19
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h1
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h54
-rw-r--r--include/uapi/scsi/scsi_netlink.h20
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h17
-rw-r--r--include/xen/arm/hypervisor.h2
-rw-r--r--include/xen/arm/page-coherent.h24
-rw-r--r--include/xen/swiotlb-xen.h5
228 files changed, 4599 insertions, 2222 deletions
diff --git a/include/Kbuild b/include/Kbuild
index bfbe7adc261b..ffba79483cc5 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -310,10 +310,8 @@ header-test- += linux/mfd/adp5520.h
header-test- += linux/mfd/arizona/pdata.h
header-test- += linux/mfd/as3711.h
header-test- += linux/mfd/as3722.h
-header-test- += linux/mfd/cros_ec_commands.h
header-test- += linux/mfd/da903x.h
header-test- += linux/mfd/da9055/pdata.h
-header-test- += linux/mfd/da9063/pdata.h
header-test- += linux/mfd/db8500-prcmu.h
header-test- += linux/mfd/dbx500-prcmu.h
header-test- += linux/mfd/dln2.h
@@ -455,6 +453,7 @@ header-test- += linux/platform_data/ata-pxa.h
header-test- += linux/platform_data/atmel.h
header-test- += linux/platform_data/bh1770glc.h
header-test- += linux/platform_data/brcmfmac.h
+header-test- += linux/platform_data/cros_ec_commands.h
header-test- += linux/platform_data/clk-u300.h
header-test- += linux/platform_data/cyttsp4.h
header-test- += linux/platform_data/dma-coh901318.h
@@ -881,12 +880,6 @@ header-test- += net/xdp.h
header-test- += net/xdp_priv.h
header-test- += pcmcia/cistpl.h
header-test- += pcmcia/ds.h
-header-test- += rdma/ib.h
-header-test- += rdma/iw_portmap.h
-header-test- += rdma/opa_port_info.h
-header-test- += rdma/rdmavt_cq.h
-header-test- += rdma/restrack.h
-header-test- += rdma/signature.h
header-test- += rdma/tid_rdma_defs.h
header-test- += scsi/fc/fc_encaps.h
header-test- += scsi/fc/fc_fc2.h
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7357a3c942a0..384b5c835ced 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
@@ -61,18 +62,6 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
-#ifdef __WARN_FLAGS
-#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
-#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
-
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_ONCE_TAINT(TAINT_WARN); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant kernel issues that need prompt attention if they should ever
@@ -89,27 +78,27 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_TAINT
-extern __printf(3, 4)
-void warn_slowpath_fmt(const char *file, const int line,
- const char *fmt, ...);
+#ifndef __WARN_FLAGS
extern __printf(4, 5)
-void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
- const char *fmt, ...);
-extern void warn_slowpath_null(const char *file, const int line);
-#define WANT_WARN_ON_SLOWPATH
-#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
-#define __WARN_printf_taint(taint, arg...) \
- warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#define __WARN_printf(taint, arg...) \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() do { \
- printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
-} while (0)
-#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
-#define __WARN_printf_taint(taint, arg...) \
- do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#define __WARN_printf(taint, arg...) do { \
+ __warn_printk(arg); \
+ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ } while (0)
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
#endif
/* used internally by panic.c */
@@ -132,7 +121,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
+ __WARN_printf(TAINT_WARN, format); \
unlikely(__ret_warn_on); \
})
#endif
@@ -140,7 +129,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN_TAINT(condition, taint, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf_taint(taint, format); \
+ __WARN_printf(taint, format); \
unlikely(__ret_warn_on); \
})
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 294d6ae785d4..fa577978fbbd 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -4,26 +4,24 @@
#ifndef KSYM_FUNC
#define KSYM_FUNC(x) x
#endif
-#ifdef CONFIG_64BIT
-#ifndef KSYM_ALIGN
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define KSYM_ALIGN 4
+#elif defined(CONFIG_64BIT)
#define KSYM_ALIGN 8
-#endif
#else
-#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
-#endif
#ifndef KCRC_ALIGN
#define KCRC_ALIGN 4
#endif
.macro __put, val, name
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
- .long \val - ., \name - .
+ .long \val - ., \name - ., 0
#elif defined(CONFIG_64BIT)
- .quad \val, \name
+ .quad \val, \name, 0
#else
- .long \val, \name
+ .long \val, \name, 0
#endif
.endm
@@ -57,7 +55,6 @@ __kcrctab_\name:
#endif
#endif
.endm
-#undef __put
#if defined(CONFIG_TRIM_UNUSED_KSYMS)
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 8476175c07e7..73f7421413cb 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
@@ -63,7 +63,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
pte = alloc_page(gfp);
if (!pte)
return NULL;
- if (!pgtable_page_ctor(pte)) {
+ if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
@@ -76,7 +76,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* Return: `struct page` initialized as page table or %NULL on error
*/
@@ -98,15 +98,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_page_dtor(pte_page);
+ pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
-#else /* CONFIG_MMU */
-
-/* This is enough for a nommu architecture */
-#define check_pgt_cache() do { } while (0)
-
#endif /* CONFIG_MMU */
#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 75d9d68a6de7..818691846c90 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1002,9 +1002,8 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
* need this). If THP is not enabled, the pmd can't go away under the
* code even if MADV_DONTNEED runs, but if THP is enabled we need to
* run a pmd_trans_unstable before walking the ptes after
- * split_huge_page_pmd returns (because it may have run when the pmd
- * become null, but then a page fault can map in a THP and not a
- * regular page).
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
*/
static inline int pmd_trans_unstable(pmd_t *pmd)
{
@@ -1126,7 +1125,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
-extern void __init pgd_cache_init(void);
+extern void __init pgtable_cache_init(void);
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index cd28f63bfbc7..dae64600ccbf 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -215,8 +215,13 @@
__start_lsm_info = .; \
KEEP(*(.lsm_info.init)) \
__end_lsm_info = .;
+#define EARLY_LSM_TABLE() . = ALIGN(8); \
+ __start_early_lsm_info = .; \
+ KEEP(*(.early_lsm_info.init)) \
+ __end_early_lsm_info = .;
#else
#define LSM_TABLE()
+#define EARLY_LSM_TABLE()
#endif
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
@@ -627,7 +632,8 @@
ACPI_PROBE_TABLE(timer) \
THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
- LSM_TABLE()
+ LSM_TABLE() \
+ EARLY_LSM_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 96071bee03ac..38ec7f5f9041 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -9,6 +9,7 @@
#define _CRYPTO_PKCS7_H
#include <linux/verification.h>
+#include <linux/hash_info.h>
#include <crypto/public_key.h>
struct key;
@@ -40,4 +41,7 @@ extern int pkcs7_verify(struct pkcs7_message *pkcs7,
extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
const void *data, size_t datalen);
+extern int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf,
+ u32 *len, enum hash_algo *hash_algo);
+
#endif /* _CRYPTO_PKCS7_H */
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index bcc2bcf32886..296aab724677 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -49,7 +49,11 @@ enum amd_asic_type {
CHIP_VEGA12,
CHIP_VEGA20,
CHIP_RAVEN,
+ CHIP_ARCTURUS,
+ CHIP_RENOIR,
CHIP_NAVI10,
+ CHIP_NAVI14,
+ CHIP_NAVI12,
CHIP_LAST,
};
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index e56046cf4d04..7aa2f93da49c 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -38,10 +38,6 @@ struct analogix_dp_plat_data {
struct drm_connector *);
};
-int analogix_dp_psr_enabled(struct analogix_dp_device *dp);
-int analogix_dp_enable_psr(struct analogix_dp_device *dp);
-int analogix_dp_disable_psr(struct analogix_dp_device *dp);
-
int analogix_dp_resume(struct analogix_dp_device *dp);
int analogix_dp_suspend(struct analogix_dp_device *dp);
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index c402364aec0d..cf528c289857 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -155,6 +155,8 @@ void dw_hdmi_resume(struct dw_hdmi *hdmi);
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
+void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 94aae87b1138..037b1f7a87a5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -87,7 +87,7 @@ struct module;
struct device_node;
struct videomode;
-struct reservation_object;
+struct dma_resv;
struct dma_buf_attachment;
struct pci_dev;
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index b05e462276d5..664e120b93e6 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -31,11 +31,6 @@ struct drm_agp_head {
void drm_free_agp(struct agp_memory * handle, int pages);
int drm_bind_agp(struct agp_memory * handle, unsigned int start);
int drm_unbind_agp(struct agp_memory * handle);
-struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- uint32_t type);
struct drm_agp_head *drm_agp_init(struct drm_device *dev);
void drm_legacy_agp_clear(struct drm_device *dev);
@@ -80,15 +75,6 @@ static inline int drm_unbind_agp(struct agp_memory * handle)
return -ENODEV;
}
-static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- uint32_t type)
-{
- return NULL;
-}
-
static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
return NULL;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ca745d9feaf5..681cb590f952 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -41,6 +41,7 @@ struct drm_property;
struct drm_property_blob;
struct drm_printer;
struct edid;
+struct i2c_adapter;
enum drm_connector_force {
DRM_FORCE_UNSPECIFIED,
@@ -323,6 +324,8 @@ enum drm_panel_orientation {
* edge of the pixel clock
* @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: Sync signals are sampled on the falling
* edge of the pixel clock
+ * @DRM_BUS_FLAG_SHARP_SIGNALS: Set if the Sharp-specific signals
+ * (SPL, CLS, PS, REV) must be used
*/
enum drm_bus_flags {
DRM_BUS_FLAG_DE_LOW = BIT(0),
@@ -341,6 +344,7 @@ enum drm_bus_flags {
DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
+ DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8),
};
/**
@@ -539,8 +543,8 @@ struct drm_connector_state {
*
* This is also used in the atomic helpers to map encoders to their
* current and previous connectors, see
- * &drm_atomic_get_old_connector_for_encoder() and
- * &drm_atomic_get_new_connector_for_encoder().
+ * drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder().
*
* NOTE: Atomic drivers must fill this out (either themselves or through
* helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will
@@ -599,6 +603,12 @@ struct drm_connector_state {
unsigned int content_type;
/**
+ * @hdcp_content_type: Connector property to pass the type of
+ * protected content. This is most commonly used for HDCP.
+ */
+ unsigned int hdcp_content_type;
+
+ /**
* @scaling_mode: Connector property to control the
* upscaling, mostly used for built-in panels.
*/
@@ -1308,6 +1318,18 @@ struct drm_connector {
* [0]: progressive, [1]: interlaced
*/
int audio_latency[2];
+
+ /**
+ * @ddc: associated ddc adapter.
+ * A connector usually has its associated ddc adapter. If a driver uses
+ * this field, then an appropriate symbolic link is created in connector
+ * sysfs directory to make it easy for the user to tell which i2c
+ * adapter is for a particular display.
+ *
+ * The field should be set by calling drm_connector_init_with_ddc().
+ */
+ struct i2c_adapter *ddc;
+
/**
* @null_edid_counter: track sinks that give us all zeros for the EDID.
* Needed to workaround some HW bugs where we get all 0s
@@ -1396,6 +1418,11 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+int drm_connector_init_with_ddc(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
@@ -1481,6 +1508,7 @@ const char *drm_get_dvi_i_select_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
const char *drm_get_content_protection_name(int val);
+const char *drm_get_hdcp_content_type_name(int val);
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 128d8b210621..408b6f4e63c0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -285,12 +285,12 @@ struct drm_crtc_state {
u32 target_vblank;
/**
- * @pageflip_flags:
+ * @async_flip:
*
- * DRM_MODE_PAGE_FLIP_* flags, as passed to the page flip ioctl.
- * Zero in any other case.
+ * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy
+ * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet.
*/
- u32 pageflip_flags;
+ bool async_flip;
/**
* @vrr_enabled:
@@ -756,6 +756,9 @@ struct drm_crtc_funcs {
* provided from the configured source. Drivers must accept an "auto"
* source name that will select a default source for this CRTC.
*
+ * This may trigger an atomic modeset commit if necessary, to enable CRC
+ * generation.
+ *
* Note that "auto" can depend upon the current modeset configuration,
* e.g. it could pick an encoder or output specific CRC sampling point.
*
@@ -767,6 +770,7 @@ struct drm_crtc_funcs {
* 0 on success or a negative error code on failure.
*/
int (*set_crc_source)(struct drm_crtc *crtc, const char *source);
+
/**
* @verify_crc_source:
*
@@ -1104,7 +1108,7 @@ struct drm_crtc {
/**
* @self_refresh_data: Holds the state for the self refresh helpers
*
- * Initialized via drm_self_refresh_helper_register().
+ * Initialized via drm_self_refresh_helper_init().
*/
struct drm_self_refresh_data *self_refresh_data;
};
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 397896b5b21a..8364502f92cf 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1309,6 +1309,10 @@ struct drm_dp_aux {
* @cec: struct containing fields used for CEC-Tunneling-over-AUX.
*/
struct drm_dp_aux_cec cec;
+ /**
+ * @is_remote: Is this AUX CH actually using sideband messaging.
+ */
+ bool is_remote;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 8c97a5f92c47..2ba6253ea6d3 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -643,6 +643,17 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+
+ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size);
+ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size);
+
+int drm_dp_mst_connector_late_register(struct drm_connector *connector,
+ struct drm_dp_mst_port *port);
+void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
+ struct drm_dp_mst_port *port);
+
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int __must_check
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 68ca736c548d..8976afe48c1c 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -62,12 +62,6 @@ enum drm_driver_feature {
*/
DRIVER_MODESET = BIT(1),
/**
- * @DRIVER_PRIME:
- *
- * Driver implements DRM PRIME buffer sharing.
- */
- DRIVER_PRIME = BIT(2),
- /**
* @DRIVER_RENDER:
*
* Driver supports dedicated render nodes. See also the :ref:`section on
@@ -502,21 +496,25 @@ struct drm_driver {
* @gem_free_object: deconstructor for drm_gem_objects
*
* This is deprecated and should not be used by new drivers. Use
- * @gem_free_object_unlocked instead.
+ * &drm_gem_object_funcs.free instead.
*/
void (*gem_free_object) (struct drm_gem_object *obj);
/**
* @gem_free_object_unlocked: deconstructor for drm_gem_objects
*
- * This is for drivers which are not encumbered with &drm_device.struct_mutex
- * legacy locking schemes. Use this hook instead of @gem_free_object.
+ * This is deprecated and should not be used by new drivers. Use
+ * &drm_gem_object_funcs.free instead.
+ * Compared to @gem_free_object this is not encumbered with
+ * &drm_device.struct_mutex legacy locking schemes.
*/
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
/**
* @gem_open_object:
*
+ * This callback is deprecated in favour of &drm_gem_object_funcs.open.
+ *
* Driver hook called upon gem handle creation
*/
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
@@ -524,6 +522,8 @@ struct drm_driver {
/**
* @gem_close_object:
*
+ * This callback is deprecated in favour of &drm_gem_object_funcs.close.
+ *
* Driver hook called upon gem handle release
*/
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -531,6 +531,9 @@ struct drm_driver {
/**
* @gem_print_info:
*
+ * This callback is deprecated in favour of
+ * &drm_gem_object_funcs.print_info.
+ *
* If driver subclasses struct &drm_gem_object, it can implement this
* optional hook for printing additional driver specific info.
*
@@ -545,56 +548,108 @@ struct drm_driver {
/**
* @gem_create_object: constructor for gem objects
*
- * Hook for allocating the GEM object struct, for use by core
- * helpers.
+ * Hook for allocating the GEM object struct, for use by the CMA and
+ * SHMEM GEM helpers.
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
-
- /* prime: */
/**
* @prime_handle_to_fd:
*
- * export handle -> fd (see drm_gem_prime_handle_to_fd() helper)
+ * Main PRIME export function. Should be implemented with
+ * drm_gem_prime_handle_to_fd() for GEM based drivers.
+ *
+ * For an in-depth discussion see :ref:`PRIME buffer sharing
+ * documentation <prime_buffer_sharing>`.
*/
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/**
* @prime_fd_to_handle:
*
- * import fd -> handle (see drm_gem_prime_fd_to_handle() helper)
+ * Main PRIME import function. Should be implemented with
+ * drm_gem_prime_fd_to_handle() for GEM based drivers.
+ *
+ * For an in-depth discussion see :ref:`PRIME buffer sharing
+ * documentation <prime_buffer_sharing>`.
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
/**
* @gem_prime_export:
*
- * export GEM -> dmabuf
- *
- * This defaults to drm_gem_prime_export() if not set.
+ * Export hook for GEM drivers. Deprecated in favour of
+ * &drm_gem_object_funcs.export.
*/
- struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+ struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
+ int flags);
/**
* @gem_prime_import:
*
- * import dmabuf -> GEM
+ * Import hook for GEM drivers.
*
* This defaults to drm_gem_prime_import() if not set.
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
+
+ /**
+ * @gem_prime_pin:
+ *
+ * Deprecated hook in favour of &drm_gem_object_funcs.pin.
+ */
int (*gem_prime_pin)(struct drm_gem_object *obj);
+
+ /**
+ * @gem_prime_unpin:
+ *
+ * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
+ */
void (*gem_prime_unpin)(struct drm_gem_object *obj);
- struct reservation_object * (*gem_prime_res_obj)(
- struct drm_gem_object *obj);
+
+
+ /**
+ * @gem_prime_get_sg_table:
+ *
+ * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
+ */
struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+
+ /**
+ * @gem_prime_import_sg_table:
+ *
+ * Optional hook used by the PRIME helper functions
+ * drm_gem_prime_import() respectively drm_gem_prime_import_dev().
+ */
struct drm_gem_object *(*gem_prime_import_sg_table)(
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
+ /**
+ * @gem_prime_vmap:
+ *
+ * Deprecated vmap hook for GEM drivers. Please use
+ * &drm_gem_object_funcs.vmap instead.
+ */
void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+
+ /**
+ * @gem_prime_vunmap:
+ *
+ * Deprecated vunmap hook for GEM drivers. Please use
+ * &drm_gem_object_funcs.vunmap instead.
+ */
void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+
+ /**
+ * @gem_prime_mmap:
+ *
+ * mmap hook for GEM drivers, used to implement dma-buf mmap in the
+ * PRIME helpers.
+ *
+ * FIXME: There's way too much duplication going on here, and also moved
+ * to &drm_gem_object_funcs.
+ */
int (*gem_prime_mmap)(struct drm_gem_object *obj,
struct vm_area_struct *vma);
@@ -662,6 +717,9 @@ struct drm_driver {
/**
* @gem_vm_ops: Driver private ops for this object
+ *
+ * For GEM drivers this is deprecated in favour of
+ * &drm_gem_object_funcs.vm_ops.
*/
const struct vm_operations_struct *gem_vm_ops;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index a9121fe66ea2..6aaba14f5972 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,7 +35,7 @@
*/
#include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <drm/drm_vma_manager.h>
@@ -101,7 +101,7 @@ struct drm_gem_object_funcs {
/**
* @pin:
*
- * Pin backing buffer in memory.
+ * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
*
* This callback is optional.
*/
@@ -110,7 +110,7 @@ struct drm_gem_object_funcs {
/**
* @unpin:
*
- * Unpin backing buffer.
+ * Unpin backing buffer. Used by the drm_gem_map_detach() helper.
*
* This callback is optional.
*/
@@ -120,16 +120,21 @@ struct drm_gem_object_funcs {
* @get_sg_table:
*
* Returns a Scatter-Gather table representation of the buffer.
- * Used when exporting a buffer.
+ * Used when exporting a buffer by the drm_gem_map_dma_buf() helper.
+ * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table()
+ * in drm_gem_unmap_buf(), therefore these helpers and this callback
+ * here cannot be used for sg tables pointing at driver private memory
+ * ranges.
*
- * This callback is mandatory if buffer export is supported.
+ * See also drm_prime_pages_to_sg().
*/
struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
/**
* @vmap:
*
- * Returns a virtual address for the buffer.
+ * Returns a virtual address for the buffer. Used by the
+ * drm_gem_dmabuf_vmap() helper.
*
* This callback is optional.
*/
@@ -138,7 +143,8 @@ struct drm_gem_object_funcs {
/**
* @vunmap:
*
- * Releases the the address previously returned by @vmap.
+ * Releases the the address previously returned by @vmap. Used by the
+ * drm_gem_dmabuf_vunmap() helper.
*
* This callback is optional.
*/
@@ -270,7 +276,7 @@ struct drm_gem_object {
*
* Normally (@resv == &@_resv) except for imported GEM objects.
*/
- struct reservation_object *resv;
+ struct dma_resv *resv;
/**
* @_resv:
@@ -279,7 +285,7 @@ struct drm_gem_object {
*
* This is unused for imported GEM objects.
*/
- struct reservation_object _resv;
+ struct dma_resv _resv;
/**
* @funcs:
@@ -384,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout);
int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 7f307e834eef..d9f13fd25b0a 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -33,11 +33,4 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state);
int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
-
-struct drm_framebuffer *
-drm_gem_fbdev_fb_create(struct drm_device *dev,
- struct drm_fb_helper_surface_size *sizes,
- unsigned int pitch_align, struct drm_gem_object *obj,
- const struct drm_framebuffer_funcs *funcs);
-
#endif
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 038b6d313447..01f514521687 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
*/
unsigned int pages_use_count;
+ int madv;
+ struct list_head madv_list;
+
/**
* @pages_mark_dirty_on_put:
*
@@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
+int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
+
+static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
+{
+ return (shmem->madv > 0) &&
+ !shmem->vmap_use_count && shmem->sgt &&
+ !shmem->base.dma_buf && !shmem->base.import_attach;
+}
+
+void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
+bool drm_gem_shmem_purge(struct drm_gem_object *obj);
+
struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 9581ea0a4f7e..ac217d768456 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -36,7 +36,6 @@ struct vm_area_struct;
* video memory becomes scarce.
*/
struct drm_gem_vram_object {
- struct drm_gem_object gem;
struct ttm_buffer_object bo;
struct ttm_bo_kmap_obj kmap;
@@ -68,7 +67,7 @@ static inline struct drm_gem_vram_object *drm_gem_vram_of_bo(
static inline struct drm_gem_vram_object *drm_gem_vram_of_gem(
struct drm_gem_object *gem)
{
- return container_of(gem, struct drm_gem_vram_object, gem);
+ return container_of(gem, struct drm_gem_vram_object, bo.base);
}
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
@@ -108,7 +107,6 @@ extern const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs;
* Helpers for struct drm_driver
*/
-void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem);
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -124,30 +122,8 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
- .gem_free_object_unlocked = \
- drm_gem_vram_driver_gem_free_object_unlocked, \
.dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset
-
-/*
- * PRIME helpers for struct drm_driver
- */
-
-int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *obj);
-void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *obj);
-void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *obj);
-void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *obj,
- void *vaddr);
-int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
-#define DRM_GEM_VRAM_DRIVER_PRIME \
- .gem_prime_export = drm_gem_prime_export, \
- .gem_prime_import = drm_gem_prime_import, \
- .gem_prime_pin = drm_gem_vram_driver_gem_prime_pin, \
- .gem_prime_unpin = drm_gem_vram_driver_gem_prime_unpin, \
- .gem_prime_vmap = drm_gem_vram_driver_gem_prime_vmap, \
- .gem_prime_vunmap = drm_gem_vram_driver_gem_prime_vunmap, \
- .gem_prime_mmap = drm_gem_vram_driver_gem_prime_mmap
+ .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
+ .gem_prime_mmap = drm_gem_prime_mmap
#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 13771a496e2b..06a11202a097 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -291,5 +291,12 @@ struct drm_connector;
bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
u8 *ksvs, u32 ksv_count);
int drm_connector_attach_content_protection_property(
- struct drm_connector *connector);
+ struct drm_connector *connector, bool hdcp_content_type);
+void drm_hdcp_update_content_protection(struct drm_connector *connector,
+ u64 val);
+
+/* Content Type classification for HDCP2.2 vs others */
+#define DRM_MODE_HDCP_CONTENT_TYPE0 0
+#define DRM_MODE_HDCP_CONTENT_TYPE1 1
+
#endif
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index fafb6f592c4b..10100a4bbe2a 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -114,6 +114,9 @@ enum drm_ioctl_flags {
* Whether &drm_ioctl_desc.func should be called with the DRM BKL held
* or not. Enforced as the default for all modern drivers, hence there
* should never be a need to set this flag.
+ *
+ * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
+ * only legacy IOCTL which needs this.
*/
DRM_UNLOCKED = BIT(4),
/**
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
new file mode 100644
index 000000000000..67c66f5ee591
--- /dev/null
+++ b/include/drm/drm_mipi_dbi.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * MIPI Display Bus Interface (DBI) LCD controller support
+ *
+ * Copyright 2016 Noralf Trønnes
+ */
+
+#ifndef __LINUX_MIPI_DBI_H
+#define __LINUX_MIPI_DBI_H
+
+#include <linux/mutex.h>
+#include <drm/drm_device.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct drm_rect;
+struct spi_device;
+struct gpio_desc;
+struct regulator;
+
+/**
+ * struct mipi_dbi - MIPI DBI interface
+ */
+struct mipi_dbi {
+ /**
+ * @cmdlock: Command lock
+ */
+ struct mutex cmdlock;
+
+ /**
+ * @command: Bus specific callback executing commands.
+ */
+ int (*command)(struct mipi_dbi *dbi, u8 *cmd, u8 *param, size_t num);
+
+ /**
+ * @read_commands: Array of read commands terminated by a zero entry.
+ * Reading is disabled if this is NULL.
+ */
+ const u8 *read_commands;
+
+ /**
+ * @swap_bytes: Swap bytes in buffer before transfer
+ */
+ bool swap_bytes;
+
+ /**
+ * @reset: Optional reset gpio
+ */
+ struct gpio_desc *reset;
+
+ /* Type C specific */
+
+ /**
+ * @spi: SPI device
+ */
+ struct spi_device *spi;
+
+ /**
+ * @dc: Optional D/C gpio.
+ */
+ struct gpio_desc *dc;
+
+ /**
+ * @tx_buf9: Buffer used for Option 1 9-bit conversion
+ */
+ void *tx_buf9;
+
+ /**
+ * @tx_buf9_len: Size of tx_buf9.
+ */
+ size_t tx_buf9_len;
+};
+
+/**
+ * struct mipi_dbi_dev - MIPI DBI device
+ */
+struct mipi_dbi_dev {
+ /**
+ * @drm: DRM device
+ */
+ struct drm_device drm;
+
+ /**
+ * @pipe: Display pipe structure
+ */
+ struct drm_simple_display_pipe pipe;
+
+ /**
+ * @connector: Connector
+ */
+ struct drm_connector connector;
+
+ /**
+ * @mode: Fixed display mode
+ */
+ struct drm_display_mode mode;
+
+ /**
+ * @enabled: Pipeline is enabled
+ */
+ bool enabled;
+
+ /**
+ * @tx_buf: Buffer used for transfer (copy clip rect area)
+ */
+ u16 *tx_buf;
+
+ /**
+ * @rotation: initial rotation in degrees Counter Clock Wise
+ */
+ unsigned int rotation;
+
+ /**
+ * @backlight: backlight device (optional)
+ */
+ struct backlight_device *backlight;
+
+ /**
+ * @regulator: power regulator (optional)
+ */
+ struct regulator *regulator;
+
+ /**
+ * @dbi: MIPI DBI interface
+ */
+ struct mipi_dbi dbi;
+};
+
+static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm)
+{
+ return container_of(drm, struct mipi_dbi_dev, drm);
+}
+
+int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
+ struct gpio_desc *dc);
+int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation, size_t tx_buf_size);
+int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const struct drm_display_mode *mode, unsigned int rotation);
+void mipi_dbi_release(struct drm_device *drm);
+void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state);
+void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plan_state);
+void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
+void mipi_dbi_hw_reset(struct mipi_dbi *dbi);
+bool mipi_dbi_display_is_on(struct mipi_dbi *dbi);
+int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev);
+int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev);
+
+u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
+int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
+ u8 bpw, const void *buf, size_t len);
+
+int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
+int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swap);
+/**
+ * mipi_dbi_command - MIPI DCS command with optional parameter(s)
+ * @dbi: MIPI DBI structure
+ * @cmd: Command
+ * @seq...: Optional parameter(s)
+ *
+ * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
+ * get/read.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+#define mipi_dbi_command(dbi, cmd, seq...) \
+({ \
+ u8 d[] = { seq }; \
+ mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+})
+
+#ifdef CONFIG_DEBUG_FS
+int mipi_dbi_debugfs_init(struct drm_minor *minor);
+#else
+#define mipi_dbi_debugfs_init NULL
+#endif
+
+#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index f57eea0481e0..3bcbe30339f0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -849,6 +849,12 @@ struct drm_mode_config {
*/
struct drm_property *content_protection_property;
+ /**
+ * @hdcp_content_type_property: DRM ENUM property for type of
+ * Protected Content.
+ */
+ struct drm_property *hdcp_content_type_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 8c738c0e6e9f..624bd15ecfab 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -24,6 +24,7 @@
#ifndef __DRM_PANEL_H__
#define __DRM_PANEL_H__
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -35,14 +36,6 @@ struct display_timing;
/**
* struct drm_panel_funcs - perform operations on a given panel
- * @disable: disable panel (turn off back light, etc.)
- * @unprepare: turn off panel
- * @prepare: turn on panel and perform set up
- * @enable: enable panel (turn on back light, etc.)
- * @get_modes: add modes to the connector that the panel is attached to and
- * return the number of modes added
- * @get_timings: copy display timings into the provided array and return
- * the number of display timings available
*
* The .prepare() function is typically called before the display controller
* starts to transmit video data. Panel drivers can use this to turn the panel
@@ -68,132 +61,107 @@ struct display_timing;
* the panel. This is the job of the .unprepare() function.
*/
struct drm_panel_funcs {
- int (*disable)(struct drm_panel *panel);
- int (*unprepare)(struct drm_panel *panel);
+ /**
+ * @prepare:
+ *
+ * Turn on panel and perform set up.
+ */
int (*prepare)(struct drm_panel *panel);
+
+ /**
+ * @enable:
+ *
+ * Enable panel (turn on back light, etc.).
+ */
int (*enable)(struct drm_panel *panel);
+
+ /**
+ * @disable:
+ *
+ * Disable panel (turn off back light, etc.).
+ */
+ int (*disable)(struct drm_panel *panel);
+
+ /**
+ * @unprepare:
+ *
+ * Turn off panel.
+ */
+ int (*unprepare)(struct drm_panel *panel);
+
+ /**
+ * @get_modes:
+ *
+ * Add modes to the connector that the panel is attached to and
+ * return the number of modes added.
+ */
int (*get_modes)(struct drm_panel *panel);
+
+ /**
+ * @get_timings:
+ *
+ * Copy display timings into the provided array and return
+ * the number of display timings available.
+ */
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
};
/**
* struct drm_panel - DRM panel object
- * @drm: DRM device owning the panel
- * @connector: DRM connector that the panel is attached to
- * @dev: parent device of the panel
- * @link: link from panel device (supplier) to DRM device (consumer)
- * @funcs: operations that can be performed on the panel
- * @list: panel entry in registry
*/
struct drm_panel {
+ /**
+ * @drm:
+ *
+ * DRM device owning the panel.
+ */
struct drm_device *drm;
+
+ /**
+ * @connector:
+ *
+ * DRM connector that the panel is attached to.
+ */
struct drm_connector *connector;
+
+ /**
+ * @dev:
+ *
+ * Parent device of the panel.
+ */
struct device *dev;
+ /**
+ * @funcs:
+ *
+ * Operations that can be performed on the panel.
+ */
const struct drm_panel_funcs *funcs;
+ /**
+ * @list:
+ *
+ * Panel entry in registry.
+ */
struct list_head list;
};
-/**
- * drm_disable_unprepare - power off a panel
- * @panel: DRM panel
- *
- * Calling this function will completely power off a panel (assert the panel's
- * reset, turn off power supplies, ...). After this function has completed, it
- * is usually no longer possible to communicate with the panel until another
- * call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_unprepare(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->unprepare)
- return panel->funcs->unprepare(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_disable - disable a panel
- * @panel: DRM panel
- *
- * This will typically turn off the panel's backlight or disable the display
- * drivers. For smart panels it should still be possible to communicate with
- * the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_disable(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->disable)
- return panel->funcs->disable(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_prepare - power on a panel
- * @panel: DRM panel
- *
- * Calling this function will enable power and deassert any reset signals to
- * the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_prepare(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->prepare)
- return panel->funcs->prepare(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_enable - enable a panel
- * @panel: DRM panel
- *
- * Calling this function will cause the panel display drivers to be turned on
- * and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_enable(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->enable)
- return panel->funcs->enable(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_get_modes - probe the available display modes of a panel
- * @panel: DRM panel
- *
- * The modes probed from the panel are automatically added to the connector
- * that the panel is attached to.
- *
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
- */
-static inline int drm_panel_get_modes(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
void drm_panel_init(struct drm_panel *panel);
int drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
-int drm_panel_detach(struct drm_panel *panel);
+void drm_panel_detach(struct drm_panel *panel);
+
+int drm_panel_prepare(struct drm_panel *panel);
+int drm_panel_unprepare(struct drm_panel *panel);
+
+int drm_panel_enable(struct drm_panel *panel);
+int drm_panel_disable(struct drm_panel *panel);
+
+int drm_panel_get_modes(struct drm_panel *panel);
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index b03731a3f079..d89311b822d5 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -42,7 +42,6 @@
* This just contains the internal &struct dma_buf and handle caches for each
* &struct drm_file used by the PRIME core code.
*/
-
struct drm_prime_file_private {
/* private: */
struct mutex lock;
@@ -64,25 +63,18 @@ struct drm_file;
struct device;
-struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
+/* core prime functions */
+struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+ struct dma_buf_export_info *exp_info);
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
-int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
- struct dma_buf *dma_buf,
- struct device *attach_dev);
-
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd, uint32_t *handle);
-struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
- struct dma_buf_export_info *exp_info);
-void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+/* helper functions for exporting */
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
void drm_gem_map_detach(struct dma_buf *dma_buf,
@@ -94,12 +86,25 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages);
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
+ int flags);
+
+/* helper functions for importing */
+struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
+ struct dma_buf *dma_buf,
+ struct device *attach_dev);
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages);
+
#endif /* __DRM_PRIME_H__ */
diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h
index 397a583ccca7..5b79d253fb46 100644
--- a/include/drm/drm_self_refresh_helper.h
+++ b/include/drm/drm_self_refresh_helper.h
@@ -12,9 +12,9 @@ struct drm_atomic_state;
struct drm_crtc;
void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
+void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
+ unsigned int commit_time_ms);
-int drm_self_refresh_helper_init(struct drm_crtc *crtc,
- unsigned int entry_delay_ms);
-
+int drm_self_refresh_helper_init(struct drm_crtc *crtc);
void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
#endif
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 4f311e836cdc..d454ef617b2c 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -4,10 +4,13 @@
struct drm_device;
struct device;
+struct drm_connector;
+struct drm_property;
int drm_class_device_register(struct device *dev);
void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
-
+void drm_sysfs_connector_status_event(struct drm_connector *connector,
+ struct drm_property *property);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index e528bb2f659d..9fe4ba8bc622 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -30,7 +30,6 @@
#include <drm/drm_file.h>
#include <drm/drm_modes.h>
-#include <uapi/drm/drm.h>
struct drm_device;
struct drm_crtc;
diff --git a/include/drm/drm_vram_mm_helper.h b/include/drm/drm_vram_mm_helper.h
index a8ffd8599b08..2aacfb1ccfae 100644
--- a/include/drm/drm_vram_mm_helper.h
+++ b/include/drm/drm_vram_mm_helper.h
@@ -3,6 +3,8 @@
#ifndef DRM_VRAM_MM_HELPER_H
#define DRM_VRAM_MM_HELPER_H
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/ttm/ttm_bo_driver.h>
struct drm_device;
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index dcb95bd9dee6..55c3b123581b 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,7 +34,7 @@ enum i915_component_type {
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
*/
-#define MAX_PORTS 6
+#define MAX_PORTS 9
/**
* struct i915_audio_component - Used for direct communication between i915 and hda drivers
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7523e9a7b6e2..23274cf92712 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -30,11 +30,11 @@
#include <uapi/drm/i915_drm.h>
/* For use by IPS driver */
-extern unsigned long i915_read_mch_val(void);
-extern bool i915_gpu_raise(void);
-extern bool i915_gpu_lower(void);
-extern bool i915_gpu_busy(void);
-extern bool i915_gpu_turbo_disable(void);
+unsigned long i915_read_mch_val(void);
+bool i915_gpu_raise(void);
+bool i915_gpu_lower(void);
+bool i915_gpu_busy(void);
+bool i915_gpu_turbo_disable(void);
/* Exported from arch/x86/kernel/early-quirks.c */
extern struct resource intel_graphics_stolen_res;
@@ -109,6 +109,9 @@ enum port {
PORT_D,
PORT_E,
PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
I915_MAX_PORTS
};
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 6d60ea68c171..b1f66b117c74 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -466,7 +466,10 @@
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
- INTEL_VGA_DEVICE(0x9BC2, info)
+ INTEL_VGA_DEVICE(0x9BC2, info), \
+ INTEL_VGA_DEVICE(0x9BC6, info), \
+ INTEL_VGA_DEVICE(0x9BE6, info), \
+ INTEL_VGA_DEVICE(0x9BF6, info)
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
@@ -568,7 +571,8 @@
INTEL_VGA_DEVICE(0x8A56, info), \
INTEL_VGA_DEVICE(0x8A71, info), \
INTEL_VGA_DEVICE(0x8A70, info), \
- INTEL_VGA_DEVICE(0x8A53, info)
+ INTEL_VGA_DEVICE(0x8A53, info), \
+ INTEL_VGA_DEVICE(0x8A54, info)
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
@@ -582,4 +586,14 @@
INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4541, info)
+/* TGL */
+#define INTEL_TGL_12_IDS(info) \
+ INTEL_VGA_DEVICE(0x9A49, info), \
+ INTEL_VGA_DEVICE(0x9A40, info), \
+ INTEL_VGA_DEVICE(0x9A59, info), \
+ INTEL_VGA_DEVICE(0x9A60, info), \
+ INTEL_VGA_DEVICE(0x9A68, info), \
+ INTEL_VGA_DEVICE(0x9A70, info), \
+ INTEL_VGA_DEVICE(0x9A78, info)
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
deleted file mode 100644
index 51fc667beef7..000000000000
--- a/include/drm/tinydrm/mipi-dbi.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * MIPI Display Bus Interface (DBI) LCD controller support
- *
- * Copyright 2016 Noralf Trønnes
- */
-
-#ifndef __LINUX_MIPI_DBI_H
-#define __LINUX_MIPI_DBI_H
-
-#include <linux/mutex.h>
-#include <drm/drm_device.h>
-#include <drm/drm_simple_kms_helper.h>
-
-struct drm_rect;
-struct spi_device;
-struct gpio_desc;
-struct regulator;
-
-/**
- * struct mipi_dbi - MIPI DBI controller
- * @spi: SPI device
- * @enabled: Pipeline is enabled
- * @cmdlock: Command lock
- * @command: Bus specific callback executing commands.
- * @read_commands: Array of read commands terminated by a zero entry.
- * Reading is disabled if this is NULL.
- * @dc: Optional D/C gpio.
- * @tx_buf: Buffer used for transfer (copy clip rect area)
- * @tx_buf9: Buffer used for Option 1 9-bit conversion
- * @tx_buf9_len: Size of tx_buf9.
- * @swap_bytes: Swap bytes in buffer before transfer
- * @reset: Optional reset gpio
- * @rotation: initial rotation in degrees Counter Clock Wise
- * @backlight: backlight device (optional)
- * @regulator: power regulator (optional)
- */
-struct mipi_dbi {
- /**
- * @drm: DRM device
- */
- struct drm_device drm;
-
- /**
- * @pipe: Display pipe structure
- */
- struct drm_simple_display_pipe pipe;
-
- struct spi_device *spi;
- bool enabled;
- struct mutex cmdlock;
- int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num);
- const u8 *read_commands;
- struct gpio_desc *dc;
- u16 *tx_buf;
- void *tx_buf9;
- size_t tx_buf9_len;
- bool swap_bytes;
- struct gpio_desc *reset;
- unsigned int rotation;
- struct backlight_device *backlight;
- struct regulator *regulator;
-};
-
-static inline struct mipi_dbi *drm_to_mipi_dbi(struct drm_device *drm)
-{
- return container_of(drm, struct mipi_dbi, drm);
-}
-
-int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
- struct gpio_desc *dc);
-int mipi_dbi_init(struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *funcs,
- const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_release(struct drm_device *drm);
-void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plan_state);
-void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
-void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
-bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
-int mipi_dbi_poweron_reset(struct mipi_dbi *mipi);
-int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi);
-u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
-
-int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
-int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
-int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap);
-/**
- * mipi_dbi_command - MIPI DCS command with optional parameter(s)
- * @mipi: MIPI structure
- * @cmd: Command
- * @seq...: Optional parameter(s)
- *
- * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
- * get/read.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-#define mipi_dbi_command(mipi, cmd, seq...) \
-({ \
- u8 d[] = { seq }; \
- mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \
-})
-
-#ifdef CONFIG_DEBUG_FS
-int mipi_dbi_debugfs_init(struct drm_minor *minor);
-#else
-#define mipi_dbi_debugfs_init NULL
-#endif
-
-#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
deleted file mode 100644
index f8bcadf48cb1..000000000000
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Noralf Trønnes
- */
-
-#ifndef __LINUX_TINYDRM_HELPERS_H
-#define __LINUX_TINYDRM_HELPERS_H
-
-struct backlight_device;
-struct drm_device;
-struct drm_display_mode;
-struct drm_framebuffer;
-struct drm_rect;
-struct drm_simple_display_pipe;
-struct drm_simple_display_pipe_funcs;
-struct spi_transfer;
-struct spi_message;
-struct spi_device;
-struct device;
-
-/**
- * tinydrm_machine_little_endian - Machine is little endian
- *
- * Returns:
- * true if *defined(__LITTLE_ENDIAN)*, false otherwise
- */
-static inline bool tinydrm_machine_little_endian(void)
-{
-#if defined(__LITTLE_ENDIAN)
- return true;
-#else
- return false;
-#endif
-}
-
-int tinydrm_display_pipe_init(struct drm_device *drm,
- struct drm_simple_display_pipe *pipe,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation);
-
-size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len);
-bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw);
-int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
- struct spi_transfer *header, u8 bpw, const void *buf,
- size_t len);
-void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m);
-
-#ifdef DEBUG
-/**
- * tinydrm_dbg_spi_message - Dump SPI message
- * @spi: SPI device
- * @m: SPI message
- *
- * Dumps info about the transfers in a SPI message including buffer content.
- * DEBUG has to be defined for this function to be enabled alongside setting
- * the DRM_UT_DRIVER bit of &drm_debug.
- */
-static inline void tinydrm_dbg_spi_message(struct spi_device *spi,
- struct spi_message *m)
-{
- if (drm_debug & DRM_UT_DRIVER)
- _tinydrm_dbg_spi_message(spi, m);
-}
-#else
-static inline void tinydrm_dbg_spi_message(struct spi_device *spi,
- struct spi_message *m)
-{
-}
-#endif /* DEBUG */
-
-#endif /* __LINUX_TINYDRM_HELPERS_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 49d9cdfc58f2..43c4929a2171 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -31,6 +31,7 @@
#ifndef _TTM_BO_API_H_
#define _TTM_BO_API_H_
+#include <drm/drm_gem.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_vma_manager.h>
#include <linux/kref.h>
@@ -39,7 +40,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/bitmap.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
struct ttm_bo_global;
@@ -127,6 +128,7 @@ struct ttm_tt;
/**
* struct ttm_buffer_object
*
+ * @base: drm_gem_object superclass data.
* @bdev: Pointer to the buffer object device structure.
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
@@ -150,7 +152,6 @@ struct ttm_tt;
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
* @moving: Fence set when BO is moving
- * @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
@@ -169,6 +170,8 @@ struct ttm_tt;
*/
struct ttm_buffer_object {
+ struct drm_gem_object base;
+
/**
* Members constant at init.
*/
@@ -215,9 +218,6 @@ struct ttm_buffer_object {
*/
struct dma_fence *moving;
-
- struct drm_vma_offset_node vma_node;
-
unsigned priority;
/**
@@ -230,8 +230,6 @@ struct ttm_buffer_object {
struct sg_table *sg;
- struct reservation_object *resv;
- struct reservation_object ttm_resv;
struct mutex wu_mutex;
};
@@ -275,7 +273,7 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
- struct reservation_object *resv;
+ struct dma_resv *resv;
uint64_t bytes_moved;
uint32_t flags;
};
@@ -495,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
* @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
@@ -528,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
@@ -547,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
@@ -572,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
unsigned long size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible, size_t acc_size,
- struct sg_table *sg, struct reservation_object *resv,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
@@ -768,4 +766,23 @@ int ttm_bo_swapout(struct ttm_bo_global *glob,
struct ttm_operation_ctx *ctx);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
+ * embedded drm_gem_object.
+ *
+ * Most ttm drivers are using gem too, so the embedded
+ * ttm_buffer_object.base will be initialized by the driver (before
+ * calling ttm_bo_init). It is also possible to use ttm without gem
+ * though (vmwgfx does that).
+ *
+ * This helper will figure whenever a given ttm bo is a gem object too
+ * or not.
+ *
+ * @bo: The bo to check.
+ */
+static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
+{
+ return bo->base.dev != NULL;
+}
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index c9b8ba492f24..6f536caea368 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -35,7 +35,7 @@
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "ttm_bo_api.h"
#include "ttm_memory.h"
@@ -390,6 +390,16 @@ struct ttm_bo_driver {
* notify driver that a BO was deleted from LRU.
*/
void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Notify the driver that we're about to release a BO
+ *
+ * @bo: BO that is about to be released
+ *
+ * Gives the driver a chance to do any cleanup, including
+ * adding fences that may force a delayed delete
+ */
+ void (*release_notify)(struct ttm_buffer_object *bo);
};
/**
@@ -654,14 +664,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
if (WARN_ON(ticket))
return -EBUSY;
- success = reservation_object_trylock(bo->resv);
+ success = dma_resv_trylock(bo->base.resv);
return success ? 0 : -EBUSY;
}
if (interruptible)
- ret = reservation_object_lock_interruptible(bo->resv, ticket);
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
else
- ret = reservation_object_lock(bo->resv, ticket);
+ ret = dma_resv_lock(bo->base.resv, ticket);
if (ret == -EINTR)
return -ERESTARTSYS;
return ret;
@@ -745,10 +755,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
WARN_ON(!kref_read(&bo->kref));
if (interruptible)
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
+ ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
else
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
+ dma_resv_lock_slow(bo->base.resv, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
@@ -773,7 +783,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
else
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
- reservation_object_unlock(bo->resv);
+ dma_resv_unlock(bo->base.resv);
}
/*
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
index 7138384e2ef9..babd08a1d226 100644
--- a/include/dt-bindings/bus/ti-sysc.h
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* TI sysc interconnect target module defines */
/* Generic sysc found on omap2 and later, also known as type1 */
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
new file mode 100644
index 000000000000..38074a5f7296
--- /dev/null
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */
+#ifndef DT_BINDINGS_AST2600_CLOCK_H
+#define DT_BINDINGS_AST2600_CLOCK_H
+
+#define ASPEED_CLK_GATE_ECLK 0
+#define ASPEED_CLK_GATE_GCLK 1
+
+#define ASPEED_CLK_GATE_MCLK 2
+
+#define ASPEED_CLK_GATE_VCLK 3
+#define ASPEED_CLK_GATE_BCLK 4
+#define ASPEED_CLK_GATE_DCLK 5
+
+#define ASPEED_CLK_GATE_LCLK 6
+#define ASPEED_CLK_GATE_LHCCLK 7
+
+#define ASPEED_CLK_GATE_D1CLK 8
+#define ASPEED_CLK_GATE_YCLK 9
+
+#define ASPEED_CLK_GATE_REF0CLK 10
+#define ASPEED_CLK_GATE_REF1CLK 11
+
+#define ASPEED_CLK_GATE_ESPICLK 12
+
+#define ASPEED_CLK_GATE_USBUHCICLK 13
+#define ASPEED_CLK_GATE_USBPORT1CLK 14
+#define ASPEED_CLK_GATE_USBPORT2CLK 15
+
+#define ASPEED_CLK_GATE_RSACLK 16
+#define ASPEED_CLK_GATE_RVASCLK 17
+
+#define ASPEED_CLK_GATE_MAC1CLK 18
+#define ASPEED_CLK_GATE_MAC2CLK 19
+#define ASPEED_CLK_GATE_MAC3CLK 20
+#define ASPEED_CLK_GATE_MAC4CLK 21
+
+#define ASPEED_CLK_GATE_UART1CLK 22
+#define ASPEED_CLK_GATE_UART2CLK 23
+#define ASPEED_CLK_GATE_UART3CLK 24
+#define ASPEED_CLK_GATE_UART4CLK 25
+#define ASPEED_CLK_GATE_UART5CLK 26
+#define ASPEED_CLK_GATE_UART6CLK 27
+#define ASPEED_CLK_GATE_UART7CLK 28
+#define ASPEED_CLK_GATE_UART8CLK 29
+#define ASPEED_CLK_GATE_UART9CLK 30
+#define ASPEED_CLK_GATE_UART10CLK 31
+#define ASPEED_CLK_GATE_UART11CLK 32
+#define ASPEED_CLK_GATE_UART12CLK 33
+#define ASPEED_CLK_GATE_UART13CLK 34
+
+#define ASPEED_CLK_GATE_SDCLK 35
+#define ASPEED_CLK_GATE_EMMCCLK 36
+
+#define ASPEED_CLK_GATE_I3C0CLK 37
+#define ASPEED_CLK_GATE_I3C1CLK 38
+#define ASPEED_CLK_GATE_I3C2CLK 39
+#define ASPEED_CLK_GATE_I3C3CLK 40
+#define ASPEED_CLK_GATE_I3C4CLK 41
+#define ASPEED_CLK_GATE_I3C5CLK 42
+#define ASPEED_CLK_GATE_I3C6CLK 43
+#define ASPEED_CLK_GATE_I3C7CLK 44
+
+#define ASPEED_CLK_GATE_FSICLK 45
+
+#define ASPEED_CLK_HPLL 46
+#define ASPEED_CLK_MPLL 47
+#define ASPEED_CLK_DPLL 48
+#define ASPEED_CLK_EPLL 49
+#define ASPEED_CLK_APLL 50
+#define ASPEED_CLK_AHB 51
+#define ASPEED_CLK_APB1 52
+#define ASPEED_CLK_APB2 53
+#define ASPEED_CLK_BCLK 54
+#define ASPEED_CLK_D1CLK 55
+#define ASPEED_CLK_VCLK 56
+#define ASPEED_CLK_LHCLK 57
+#define ASPEED_CLK_UART 58
+#define ASPEED_CLK_UARTX 59
+#define ASPEED_CLK_SDIO 60
+#define ASPEED_CLK_EMMC 61
+#define ASPEED_CLK_ECLK 62
+#define ASPEED_CLK_ECLK_MUX 63
+#define ASPEED_CLK_MAC12 64
+#define ASPEED_CLK_MAC34 65
+#define ASPEED_CLK_USBPHY_40M 66
+
+/* Only list resets here that are not part of a gate */
+#define ASPEED_RESET_ADC 55
+#define ASPEED_RESET_JTAG_MASTER2 54
+#define ASPEED_RESET_I3C_DMA 39
+#define ASPEED_RESET_PWM 37
+#define ASPEED_RESET_PECI 36
+#define ASPEED_RESET_MII 35
+#define ASPEED_RESET_I2C 34
+#define ASPEED_RESET_H2X 31
+#define ASPEED_RESET_GP_MCU 30
+#define ASPEED_RESET_DP_MCU 29
+#define ASPEED_RESET_DP 28
+#define ASPEED_RESET_RC_XDMA 27
+#define ASPEED_RESET_GRAPHICS 26
+#define ASPEED_RESET_DEV_XDMA 25
+#define ASPEED_RESET_DEV_MCTP 24
+#define ASPEED_RESET_RC_MCTP 23
+#define ASPEED_RESET_JTAG_MASTER 22
+#define ASPEED_RESET_PCIE_DEV_O 21
+#define ASPEED_RESET_PCIE_DEV_OEN 20
+#define ASPEED_RESET_PCIE_RC_O 19
+#define ASPEED_RESET_PCIE_RC_OEN 18
+#define ASPEED_RESET_PCI_DP 5
+#define ASPEED_RESET_AHB 1
+#define ASPEED_RESET_SDRAM 0
+
+#endif
diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h
index 2cec01f96897..b60c03430cf1 100644
--- a/include/dt-bindings/clock/bcm2835.h
+++ b/include/dt-bindings/clock/bcm2835.h
@@ -58,3 +58,5 @@
#define BCM2835_CLOCK_DSI1E 48
#define BCM2835_CLOCK_DSI0P 49
#define BCM2835_CLOCK_DSI1P 50
+
+#define BCM2711_CLOCK_EMMC2 51
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index 5255b1c2420e..d7b201652f4c 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -209,7 +209,8 @@
#define IMX8MN_CLK_ARM 191
#define IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK 192
#define IMX8MN_CLK_GPU_CORE_ROOT 193
+#define IMX8MN_CLK_GIC 194
-#define IMX8MN_CLK_END 194
+#define IMX8MN_CLK_END 195
#endif
diff --git a/include/dt-bindings/clock/ingenic,tcu.h b/include/dt-bindings/clock/ingenic,tcu.h
new file mode 100644
index 000000000000..d569650a7945
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,tcu.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,tcu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_INGENIC_TCU_H__
+#define __DT_BINDINGS_CLOCK_INGENIC_TCU_H__
+
+#define TCU_CLK_TIMER0 0
+#define TCU_CLK_TIMER1 1
+#define TCU_CLK_TIMER2 2
+#define TCU_CLK_TIMER3 3
+#define TCU_CLK_TIMER4 4
+#define TCU_CLK_TIMER5 5
+#define TCU_CLK_TIMER6 6
+#define TCU_CLK_TIMER7 7
+#define TCU_CLK_WDT 8
+#define TCU_CLK_OST 9
+
+#endif /* __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ */
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h
index 6ed83f926ae7..e82d77028581 100644
--- a/include/dt-bindings/clock/jz4740-cgu.h
+++ b/include/dt-bindings/clock/jz4740-cgu.h
@@ -34,5 +34,6 @@
#define JZ4740_CLK_ADC 19
#define JZ4740_CLK_I2C 20
#define JZ4740_CLK_AIC 21
+#define JZ4740_CLK_TCU 22
#endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */
diff --git a/include/dt-bindings/clock/mt6779-clk.h b/include/dt-bindings/clock/mt6779-clk.h
new file mode 100644
index 000000000000..b083139afbd2
--- /dev/null
+++ b/include/dt-bindings/clock/mt6779-clk.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Wendell Lin <wendell.lin@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT6779_H
+#define _DT_BINDINGS_CLK_MT6779_H
+
+/* TOPCKGEN */
+#define CLK_TOP_AXI 1
+#define CLK_TOP_MM 2
+#define CLK_TOP_CAM 3
+#define CLK_TOP_MFG 4
+#define CLK_TOP_CAMTG 5
+#define CLK_TOP_UART 6
+#define CLK_TOP_SPI 7
+#define CLK_TOP_MSDC50_0_HCLK 8
+#define CLK_TOP_MSDC50_0 9
+#define CLK_TOP_MSDC30_1 10
+#define CLK_TOP_MSDC30_2 11
+#define CLK_TOP_AUD 12
+#define CLK_TOP_AUD_INTBUS 13
+#define CLK_TOP_FPWRAP_ULPOSC 14
+#define CLK_TOP_SCP 15
+#define CLK_TOP_ATB 16
+#define CLK_TOP_SSPM 17
+#define CLK_TOP_DPI0 18
+#define CLK_TOP_SCAM 19
+#define CLK_TOP_AUD_1 20
+#define CLK_TOP_AUD_2 21
+#define CLK_TOP_DISP_PWM 22
+#define CLK_TOP_SSUSB_TOP_XHCI 23
+#define CLK_TOP_USB_TOP 24
+#define CLK_TOP_SPM 25
+#define CLK_TOP_I2C 26
+#define CLK_TOP_F52M_MFG 27
+#define CLK_TOP_SENINF 28
+#define CLK_TOP_DXCC 29
+#define CLK_TOP_CAMTG2 30
+#define CLK_TOP_AUD_ENG1 31
+#define CLK_TOP_AUD_ENG2 32
+#define CLK_TOP_FAES_UFSFDE 33
+#define CLK_TOP_FUFS 34
+#define CLK_TOP_IMG 35
+#define CLK_TOP_DSP 36
+#define CLK_TOP_DSP1 37
+#define CLK_TOP_DSP2 38
+#define CLK_TOP_IPU_IF 39
+#define CLK_TOP_CAMTG3 40
+#define CLK_TOP_CAMTG4 41
+#define CLK_TOP_PMICSPI 42
+#define CLK_TOP_MAINPLL_CK 43
+#define CLK_TOP_MAINPLL_D2 44
+#define CLK_TOP_MAINPLL_D3 45
+#define CLK_TOP_MAINPLL_D5 46
+#define CLK_TOP_MAINPLL_D7 47
+#define CLK_TOP_MAINPLL_D2_D2 48
+#define CLK_TOP_MAINPLL_D2_D4 49
+#define CLK_TOP_MAINPLL_D2_D8 50
+#define CLK_TOP_MAINPLL_D2_D16 51
+#define CLK_TOP_MAINPLL_D3_D2 52
+#define CLK_TOP_MAINPLL_D3_D4 53
+#define CLK_TOP_MAINPLL_D3_D8 54
+#define CLK_TOP_MAINPLL_D5_D2 55
+#define CLK_TOP_MAINPLL_D5_D4 56
+#define CLK_TOP_MAINPLL_D7_D2 57
+#define CLK_TOP_MAINPLL_D7_D4 58
+#define CLK_TOP_UNIVPLL_CK 59
+#define CLK_TOP_UNIVPLL_D2 60
+#define CLK_TOP_UNIVPLL_D3 61
+#define CLK_TOP_UNIVPLL_D5 62
+#define CLK_TOP_UNIVPLL_D7 63
+#define CLK_TOP_UNIVPLL_D2_D2 64
+#define CLK_TOP_UNIVPLL_D2_D4 65
+#define CLK_TOP_UNIVPLL_D2_D8 66
+#define CLK_TOP_UNIVPLL_D3_D2 67
+#define CLK_TOP_UNIVPLL_D3_D4 68
+#define CLK_TOP_UNIVPLL_D3_D8 69
+#define CLK_TOP_UNIVPLL_D5_D2 70
+#define CLK_TOP_UNIVPLL_D5_D4 71
+#define CLK_TOP_UNIVPLL_D5_D8 72
+#define CLK_TOP_APLL1_CK 73
+#define CLK_TOP_APLL1_D2 74
+#define CLK_TOP_APLL1_D4 75
+#define CLK_TOP_APLL1_D8 76
+#define CLK_TOP_APLL2_CK 77
+#define CLK_TOP_APLL2_D2 78
+#define CLK_TOP_APLL2_D4 79
+#define CLK_TOP_APLL2_D8 80
+#define CLK_TOP_TVDPLL_CK 81
+#define CLK_TOP_TVDPLL_D2 82
+#define CLK_TOP_TVDPLL_D4 83
+#define CLK_TOP_TVDPLL_D8 84
+#define CLK_TOP_TVDPLL_D16 85
+#define CLK_TOP_MSDCPLL_CK 86
+#define CLK_TOP_MSDCPLL_D2 87
+#define CLK_TOP_MSDCPLL_D4 88
+#define CLK_TOP_MSDCPLL_D8 89
+#define CLK_TOP_MSDCPLL_D16 90
+#define CLK_TOP_AD_OSC_CK 91
+#define CLK_TOP_OSC_D2 92
+#define CLK_TOP_OSC_D4 93
+#define CLK_TOP_OSC_D8 94
+#define CLK_TOP_OSC_D16 95
+#define CLK_TOP_F26M_CK_D2 96
+#define CLK_TOP_MFGPLL_CK 97
+#define CLK_TOP_UNIVP_192M_CK 98
+#define CLK_TOP_UNIVP_192M_D2 99
+#define CLK_TOP_UNIVP_192M_D4 100
+#define CLK_TOP_UNIVP_192M_D8 101
+#define CLK_TOP_UNIVP_192M_D16 102
+#define CLK_TOP_UNIVP_192M_D32 103
+#define CLK_TOP_MMPLL_CK 104
+#define CLK_TOP_MMPLL_D4 105
+#define CLK_TOP_MMPLL_D4_D2 106
+#define CLK_TOP_MMPLL_D4_D4 107
+#define CLK_TOP_MMPLL_D5 108
+#define CLK_TOP_MMPLL_D5_D2 109
+#define CLK_TOP_MMPLL_D5_D4 110
+#define CLK_TOP_MMPLL_D6 111
+#define CLK_TOP_MMPLL_D7 112
+#define CLK_TOP_CLK26M 113
+#define CLK_TOP_CLK13M 114
+#define CLK_TOP_ADSP 115
+#define CLK_TOP_DPMAIF 116
+#define CLK_TOP_VENC 117
+#define CLK_TOP_VDEC 118
+#define CLK_TOP_CAMTM 119
+#define CLK_TOP_PWM 120
+#define CLK_TOP_ADSPPLL_CK 121
+#define CLK_TOP_I2S0_M_SEL 122
+#define CLK_TOP_I2S1_M_SEL 123
+#define CLK_TOP_I2S2_M_SEL 124
+#define CLK_TOP_I2S3_M_SEL 125
+#define CLK_TOP_I2S4_M_SEL 126
+#define CLK_TOP_I2S5_M_SEL 127
+#define CLK_TOP_APLL12_DIV0 128
+#define CLK_TOP_APLL12_DIV1 129
+#define CLK_TOP_APLL12_DIV2 130
+#define CLK_TOP_APLL12_DIV3 131
+#define CLK_TOP_APLL12_DIV4 132
+#define CLK_TOP_APLL12_DIVB 133
+#define CLK_TOP_APLL12_DIV5 134
+#define CLK_TOP_IPE 135
+#define CLK_TOP_DPE 136
+#define CLK_TOP_CCU 137
+#define CLK_TOP_DSP3 138
+#define CLK_TOP_SENINF1 139
+#define CLK_TOP_SENINF2 140
+#define CLK_TOP_AUD_H 141
+#define CLK_TOP_CAMTG5 142
+#define CLK_TOP_TVDPLL_MAINPLL_D2_CK 143
+#define CLK_TOP_AD_OSC2_CK 144
+#define CLK_TOP_OSC2_D2 145
+#define CLK_TOP_OSC2_D3 146
+#define CLK_TOP_FMEM_466M_CK 147
+#define CLK_TOP_ADSPPLL_D4 148
+#define CLK_TOP_ADSPPLL_D5 149
+#define CLK_TOP_ADSPPLL_D6 150
+#define CLK_TOP_OSC_D10 151
+#define CLK_TOP_UNIVPLL_D3_D16 152
+#define CLK_TOP_NR_CLK 153
+
+/* APMIXED */
+#define CLK_APMIXED_ARMPLL_LL 1
+#define CLK_APMIXED_ARMPLL_BL 2
+#define CLK_APMIXED_ARMPLL_BB 3
+#define CLK_APMIXED_CCIPLL 4
+#define CLK_APMIXED_MAINPLL 5
+#define CLK_APMIXED_UNIV2PLL 6
+#define CLK_APMIXED_MSDCPLL 7
+#define CLK_APMIXED_ADSPPLL 8
+#define CLK_APMIXED_MMPLL 9
+#define CLK_APMIXED_MFGPLL 10
+#define CLK_APMIXED_TVDPLL 11
+#define CLK_APMIXED_APLL1 12
+#define CLK_APMIXED_APLL2 13
+#define CLK_APMIXED_SSUSB26M 14
+#define CLK_APMIXED_APPLL26M 15
+#define CLK_APMIXED_MIPIC0_26M 16
+#define CLK_APMIXED_MDPLLGP26M 17
+#define CLK_APMIXED_MM_F26M 18
+#define CLK_APMIXED_UFS26M 19
+#define CLK_APMIXED_MIPIC1_26M 20
+#define CLK_APMIXED_MEMPLL26M 21
+#define CLK_APMIXED_CLKSQ_LVPLL_26M 22
+#define CLK_APMIXED_MIPID0_26M 23
+#define CLK_APMIXED_MIPID1_26M 24
+#define CLK_APMIXED_NR_CLK 25
+
+/* CAMSYS */
+#define CLK_CAM_LARB10 1
+#define CLK_CAM_DFP_VAD 2
+#define CLK_CAM_LARB11 3
+#define CLK_CAM_LARB9 4
+#define CLK_CAM_CAM 5
+#define CLK_CAM_CAMTG 6
+#define CLK_CAM_SENINF 7
+#define CLK_CAM_CAMSV0 8
+#define CLK_CAM_CAMSV1 9
+#define CLK_CAM_CAMSV2 10
+#define CLK_CAM_CAMSV3 11
+#define CLK_CAM_CCU 12
+#define CLK_CAM_FAKE_ENG 13
+#define CLK_CAM_NR_CLK 14
+
+/* INFRA */
+#define CLK_INFRA_PMIC_TMR 1
+#define CLK_INFRA_PMIC_AP 2
+#define CLK_INFRA_PMIC_MD 3
+#define CLK_INFRA_PMIC_CONN 4
+#define CLK_INFRA_SCPSYS 5
+#define CLK_INFRA_SEJ 6
+#define CLK_INFRA_APXGPT 7
+#define CLK_INFRA_ICUSB 8
+#define CLK_INFRA_GCE 9
+#define CLK_INFRA_THERM 10
+#define CLK_INFRA_I2C0 11
+#define CLK_INFRA_I2C1 12
+#define CLK_INFRA_I2C2 13
+#define CLK_INFRA_I2C3 14
+#define CLK_INFRA_PWM_HCLK 15
+#define CLK_INFRA_PWM1 16
+#define CLK_INFRA_PWM2 17
+#define CLK_INFRA_PWM3 18
+#define CLK_INFRA_PWM4 19
+#define CLK_INFRA_PWM 20
+#define CLK_INFRA_UART0 21
+#define CLK_INFRA_UART1 22
+#define CLK_INFRA_UART2 23
+#define CLK_INFRA_UART3 24
+#define CLK_INFRA_GCE_26M 25
+#define CLK_INFRA_CQ_DMA_FPC 26
+#define CLK_INFRA_BTIF 27
+#define CLK_INFRA_SPI0 28
+#define CLK_INFRA_MSDC0 29
+#define CLK_INFRA_MSDC1 30
+#define CLK_INFRA_MSDC2 31
+#define CLK_INFRA_MSDC0_SCK 32
+#define CLK_INFRA_DVFSRC 33
+#define CLK_INFRA_GCPU 34
+#define CLK_INFRA_TRNG 35
+#define CLK_INFRA_AUXADC 36
+#define CLK_INFRA_CPUM 37
+#define CLK_INFRA_CCIF1_AP 38
+#define CLK_INFRA_CCIF1_MD 39
+#define CLK_INFRA_AUXADC_MD 40
+#define CLK_INFRA_MSDC1_SCK 41
+#define CLK_INFRA_MSDC2_SCK 42
+#define CLK_INFRA_AP_DMA 43
+#define CLK_INFRA_XIU 44
+#define CLK_INFRA_DEVICE_APC 45
+#define CLK_INFRA_CCIF_AP 46
+#define CLK_INFRA_DEBUGSYS 47
+#define CLK_INFRA_AUD 48
+#define CLK_INFRA_CCIF_MD 49
+#define CLK_INFRA_DXCC_SEC_CORE 50
+#define CLK_INFRA_DXCC_AO 51
+#define CLK_INFRA_DRAMC_F26M 52
+#define CLK_INFRA_IRTX 53
+#define CLK_INFRA_DISP_PWM 54
+#define CLK_INFRA_DPMAIF_CK 55
+#define CLK_INFRA_AUD_26M_BCLK 56
+#define CLK_INFRA_SPI1 57
+#define CLK_INFRA_I2C4 58
+#define CLK_INFRA_MODEM_TEMP_SHARE 59
+#define CLK_INFRA_SPI2 60
+#define CLK_INFRA_SPI3 61
+#define CLK_INFRA_UNIPRO_SCK 62
+#define CLK_INFRA_UNIPRO_TICK 63
+#define CLK_INFRA_UFS_MP_SAP_BCLK 64
+#define CLK_INFRA_MD32_BCLK 65
+#define CLK_INFRA_SSPM 66
+#define CLK_INFRA_UNIPRO_MBIST 67
+#define CLK_INFRA_SSPM_BUS_HCLK 68
+#define CLK_INFRA_I2C5 69
+#define CLK_INFRA_I2C5_ARBITER 70
+#define CLK_INFRA_I2C5_IMM 71
+#define CLK_INFRA_I2C1_ARBITER 72
+#define CLK_INFRA_I2C1_IMM 73
+#define CLK_INFRA_I2C2_ARBITER 74
+#define CLK_INFRA_I2C2_IMM 75
+#define CLK_INFRA_SPI4 76
+#define CLK_INFRA_SPI5 77
+#define CLK_INFRA_CQ_DMA 78
+#define CLK_INFRA_UFS 79
+#define CLK_INFRA_AES_UFSFDE 80
+#define CLK_INFRA_UFS_TICK 81
+#define CLK_INFRA_MSDC0_SELF 82
+#define CLK_INFRA_MSDC1_SELF 83
+#define CLK_INFRA_MSDC2_SELF 84
+#define CLK_INFRA_SSPM_26M_SELF 85
+#define CLK_INFRA_SSPM_32K_SELF 86
+#define CLK_INFRA_UFS_AXI 87
+#define CLK_INFRA_I2C6 88
+#define CLK_INFRA_AP_MSDC0 89
+#define CLK_INFRA_MD_MSDC0 90
+#define CLK_INFRA_USB 91
+#define CLK_INFRA_DEVMPU_BCLK 92
+#define CLK_INFRA_CCIF2_AP 93
+#define CLK_INFRA_CCIF2_MD 94
+#define CLK_INFRA_CCIF3_AP 95
+#define CLK_INFRA_CCIF3_MD 96
+#define CLK_INFRA_SEJ_F13M 97
+#define CLK_INFRA_AES_BCLK 98
+#define CLK_INFRA_I2C7 99
+#define CLK_INFRA_I2C8 100
+#define CLK_INFRA_FBIST2FPC 101
+#define CLK_INFRA_CCIF4_AP 102
+#define CLK_INFRA_CCIF4_MD 103
+#define CLK_INFRA_FADSP 104
+#define CLK_INFRA_SSUSB_XHCI 105
+#define CLK_INFRA_SPI6 106
+#define CLK_INFRA_SPI7 107
+#define CLK_INFRA_NR_CLK 108
+
+/* MFGCFG */
+#define CLK_MFGCFG_BG3D 1
+#define CLK_MFGCFG_NR_CLK 2
+
+/* IMG */
+#define CLK_IMG_WPE_A 1
+#define CLK_IMG_MFB 2
+#define CLK_IMG_DIP 3
+#define CLK_IMG_LARB6 4
+#define CLK_IMG_LARB5 5
+#define CLK_IMG_NR_CLK 6
+
+/* IPE */
+#define CLK_IPE_LARB7 1
+#define CLK_IPE_LARB8 2
+#define CLK_IPE_SMI_SUBCOM 3
+#define CLK_IPE_FD 4
+#define CLK_IPE_FE 5
+#define CLK_IPE_RSC 6
+#define CLK_IPE_DPE 7
+#define CLK_IPE_NR_CLK 8
+
+/* MM_CONFIG */
+#define CLK_MM_SMI_COMMON 1
+#define CLK_MM_SMI_LARB0 2
+#define CLK_MM_SMI_LARB1 3
+#define CLK_MM_GALS_COMM0 4
+#define CLK_MM_GALS_COMM1 5
+#define CLK_MM_GALS_CCU2MM 6
+#define CLK_MM_GALS_IPU12MM 7
+#define CLK_MM_GALS_IMG2MM 8
+#define CLK_MM_GALS_CAM2MM 9
+#define CLK_MM_GALS_IPU2MM 10
+#define CLK_MM_MDP_DL_TXCK 11
+#define CLK_MM_IPU_DL_TXCK 12
+#define CLK_MM_MDP_RDMA0 13
+#define CLK_MM_MDP_RDMA1 14
+#define CLK_MM_MDP_RSZ0 15
+#define CLK_MM_MDP_RSZ1 16
+#define CLK_MM_MDP_TDSHP 17
+#define CLK_MM_MDP_WROT0 18
+#define CLK_MM_FAKE_ENG 19
+#define CLK_MM_DISP_OVL0 20
+#define CLK_MM_DISP_OVL0_2L 21
+#define CLK_MM_DISP_OVL1_2L 22
+#define CLK_MM_DISP_RDMA0 23
+#define CLK_MM_DISP_RDMA1 24
+#define CLK_MM_DISP_WDMA0 25
+#define CLK_MM_DISP_COLOR0 26
+#define CLK_MM_DISP_CCORR0 27
+#define CLK_MM_DISP_AAL0 28
+#define CLK_MM_DISP_GAMMA0 29
+#define CLK_MM_DISP_DITHER0 30
+#define CLK_MM_DISP_SPLIT 31
+#define CLK_MM_DSI0_MM_CK 32
+#define CLK_MM_DSI0_IF_CK 33
+#define CLK_MM_DPI_MM_CK 34
+#define CLK_MM_DPI_IF_CK 35
+#define CLK_MM_FAKE_ENG2 36
+#define CLK_MM_MDP_DL_RX_CK 37
+#define CLK_MM_IPU_DL_RX_CK 38
+#define CLK_MM_26M 39
+#define CLK_MM_MM_R2Y 40
+#define CLK_MM_DISP_RSZ 41
+#define CLK_MM_MDP_WDMA0 42
+#define CLK_MM_MDP_AAL 43
+#define CLK_MM_MDP_HDR 44
+#define CLK_MM_DBI_MM_CK 45
+#define CLK_MM_DBI_IF_CK 46
+#define CLK_MM_MDP_WROT1 47
+#define CLK_MM_DISP_POSTMASK0 48
+#define CLK_MM_DISP_HRT_BW 49
+#define CLK_MM_DISP_OVL_FBDC 50
+#define CLK_MM_NR_CLK 51
+
+/* VDEC_GCON */
+#define CLK_VDEC_VDEC 1
+#define CLK_VDEC_LARB1 2
+#define CLK_VDEC_GCON_NR_CLK 3
+
+/* VENC_GCON */
+#define CLK_VENC_GCON_LARB 1
+#define CLK_VENC_GCON_VENC 2
+#define CLK_VENC_GCON_JPGENC 3
+#define CLK_VENC_GCON_GALS 4
+#define CLK_VENC_GCON_NR_CLK 5
+
+/* AUD */
+#define CLK_AUD_AFE 1
+#define CLK_AUD_22M 2
+#define CLK_AUD_24M 3
+#define CLK_AUD_APLL2_TUNER 4
+#define CLK_AUD_APLL_TUNER 5
+#define CLK_AUD_TDM 6
+#define CLK_AUD_ADC 7
+#define CLK_AUD_DAC 8
+#define CLK_AUD_DAC_PREDIS 9
+#define CLK_AUD_TML 10
+#define CLK_AUD_NLE 11
+#define CLK_AUD_I2S1_BCLK_SW 12
+#define CLK_AUD_I2S2_BCLK_SW 13
+#define CLK_AUD_I2S3_BCLK_SW 14
+#define CLK_AUD_I2S4_BCLK_SW 15
+#define CLK_AUD_I2S5_BCLK_SW 16
+#define CLK_AUD_CONN_I2S_ASRC 17
+#define CLK_AUD_GENERAL1_ASRC 18
+#define CLK_AUD_GENERAL2_ASRC 19
+#define CLK_AUD_DAC_HIRES 20
+#define CLK_AUD_PDN_ADDA6_ADC 21
+#define CLK_AUD_ADC_HIRES 22
+#define CLK_AUD_ADC_HIRES_TML 23
+#define CLK_AUD_ADDA6_ADC_HIRES 24
+#define CLK_AUD_3RD_DAC 25
+#define CLK_AUD_3RD_DAC_PREDIS 26
+#define CLK_AUD_3RD_DAC_TML 27
+#define CLK_AUD_3RD_DAC_HIRES 28
+#define CLK_AUD_NR_CLK 29
+
+#endif /* _DT_BINDINGS_CLK_MT6779_H */
diff --git a/include/dt-bindings/clock/mt8183-clk.h b/include/dt-bindings/clock/mt8183-clk.h
index 0046506eb24c..a7b470b0ec8a 100644
--- a/include/dt-bindings/clock/mt8183-clk.h
+++ b/include/dt-bindings/clock/mt8183-clk.h
@@ -284,6 +284,10 @@
#define CLK_INFRA_FBIST2FPC 100
#define CLK_INFRA_NR_CLK 101
+/* PERICFG */
+#define CLK_PERI_AXI 0
+#define CLK_PERI_NR_CLK 1
+
/* MFGCFG */
#define CLK_MFG_BG3D 0
#define CLK_MFG_NR_CLK 1
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
index f3283957f48d..e5411938983c 100644
--- a/include/dt-bindings/clock/omap5.h
+++ b/include/dt-bindings/clock/omap5.h
@@ -89,6 +89,9 @@
/* dss clocks */
#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+/* gpu clocks */
+#define OMAP5_GPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
index 2cd62c98561f..bc3051543347 100644
--- a/include/dt-bindings/clock/qcom,gcc-qcs404.h
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -146,6 +146,8 @@
#define GCC_MDP_TBU_CLK 138
#define GCC_QDSS_DAP_CLK 139
#define GCC_DCC_XO_CLK 140
+#define GCC_WCSS_Q6_AHB_CLK 141
+#define GCC_WCSS_Q6_AXIM_CLK 142
#define GCC_CDSP_CFG_AHB_CLK 143
#define GCC_BIMC_CDSP_CLK 144
#define GCC_CDSP_TBU_CLK 145
@@ -173,5 +175,6 @@
#define GCC_PCIE_0_CORE_STICKY_ARES 19
#define GCC_PCIE_0_SLEEP_ARES 20
#define GCC_PCIE_0_PIPE_ARES 21
+#define GCC_WDSP_RESTART 22
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h
new file mode 100644
index 000000000000..90d60ef94c64
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 3
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 6
+#define GCC_BOOT_ROM_AHB_CLK 7
+#define GCC_CAMERA_AHB_CLK 8
+#define GCC_CAMERA_HF_AXI_CLK 9
+#define GCC_CAMERA_SF_AXI_CLK 10
+#define GCC_CAMERA_XO_CLK 11
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
+#define GCC_CPUSS_AHB_CLK 14
+#define GCC_CPUSS_AHB_CLK_SRC 15
+#define GCC_CPUSS_DVM_BUS_CLK 16
+#define GCC_CPUSS_GNOC_CLK 17
+#define GCC_CPUSS_RBCPR_CLK 18
+#define GCC_DDRSS_GPU_AXI_CLK 19
+#define GCC_DISP_AHB_CLK 20
+#define GCC_DISP_HF_AXI_CLK 21
+#define GCC_DISP_SF_AXI_CLK 22
+#define GCC_DISP_XO_CLK 23
+#define GCC_EMAC_AXI_CLK 24
+#define GCC_EMAC_PTP_CLK 25
+#define GCC_EMAC_PTP_CLK_SRC 26
+#define GCC_EMAC_RGMII_CLK 27
+#define GCC_EMAC_RGMII_CLK_SRC 28
+#define GCC_EMAC_SLV_AHB_CLK 29
+#define GCC_GP1_CLK 30
+#define GCC_GP1_CLK_SRC 31
+#define GCC_GP2_CLK 32
+#define GCC_GP2_CLK_SRC 33
+#define GCC_GP3_CLK 34
+#define GCC_GP3_CLK_SRC 35
+#define GCC_GPU_CFG_AHB_CLK 36
+#define GCC_GPU_GPLL0_CLK_SRC 37
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 38
+#define GCC_GPU_IREF_CLK 39
+#define GCC_GPU_MEMNOC_GFX_CLK 40
+#define GCC_GPU_SNOC_DVM_GFX_CLK 41
+#define GCC_NPU_AT_CLK 42
+#define GCC_NPU_AXI_CLK 43
+#define GCC_NPU_CFG_AHB_CLK 44
+#define GCC_NPU_GPLL0_CLK_SRC 45
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 46
+#define GCC_NPU_TRIG_CLK 47
+#define GCC_PCIE0_PHY_REFGEN_CLK 48
+#define GCC_PCIE1_PHY_REFGEN_CLK 49
+#define GCC_PCIE_0_AUX_CLK 50
+#define GCC_PCIE_0_AUX_CLK_SRC 51
+#define GCC_PCIE_0_CFG_AHB_CLK 52
+#define GCC_PCIE_0_CLKREF_CLK 53
+#define GCC_PCIE_0_MSTR_AXI_CLK 54
+#define GCC_PCIE_0_PIPE_CLK 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PCIE_1_AUX_CLK 58
+#define GCC_PCIE_1_AUX_CLK_SRC 59
+#define GCC_PCIE_1_CFG_AHB_CLK 60
+#define GCC_PCIE_1_CLKREF_CLK 61
+#define GCC_PCIE_1_MSTR_AXI_CLK 62
+#define GCC_PCIE_1_PIPE_CLK 63
+#define GCC_PCIE_1_SLV_AXI_CLK 64
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65
+#define GCC_PCIE_PHY_AUX_CLK 66
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 67
+#define GCC_PDM2_CLK 68
+#define GCC_PDM2_CLK_SRC 69
+#define GCC_PDM_AHB_CLK 70
+#define GCC_PDM_XO4_CLK 71
+#define GCC_PRNG_AHB_CLK 72
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 73
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 74
+#define GCC_QMIP_DISP_AHB_CLK 75
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 76
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 77
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 78
+#define GCC_QSPI_CORE_CLK 79
+#define GCC_QSPI_CORE_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S0_CLK 81
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S1_CLK 83
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S2_CLK 85
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 86
+#define GCC_QUPV3_WRAP0_S3_CLK 87
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 88
+#define GCC_QUPV3_WRAP0_S4_CLK 89
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 90
+#define GCC_QUPV3_WRAP0_S5_CLK 91
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 92
+#define GCC_QUPV3_WRAP0_S6_CLK 93
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 94
+#define GCC_QUPV3_WRAP0_S7_CLK 95
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S0_CLK 97
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S1_CLK 99
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S2_CLK 101
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S3_CLK 103
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S4_CLK 105
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S5_CLK 107
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108
+#define GCC_QUPV3_WRAP2_S0_CLK 109
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110
+#define GCC_QUPV3_WRAP2_S1_CLK 111
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S2_CLK 113
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S3_CLK 115
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S4_CLK 117
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S5_CLK 119
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126
+#define GCC_SDCC2_AHB_CLK 127
+#define GCC_SDCC2_APPS_CLK 128
+#define GCC_SDCC2_APPS_CLK_SRC 129
+#define GCC_SDCC4_AHB_CLK 130
+#define GCC_SDCC4_APPS_CLK 131
+#define GCC_SDCC4_APPS_CLK_SRC 132
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 133
+#define GCC_TSIF_AHB_CLK 134
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 135
+#define GCC_TSIF_REF_CLK 136
+#define GCC_TSIF_REF_CLK_SRC 137
+#define GCC_UFS_CARD_AHB_CLK 138
+#define GCC_UFS_CARD_AXI_CLK 139
+#define GCC_UFS_CARD_AXI_CLK_SRC 140
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 141
+#define GCC_UFS_CARD_CLKREF_CLK 142
+#define GCC_UFS_CARD_ICE_CORE_CLK 143
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 144
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 145
+#define GCC_UFS_CARD_PHY_AUX_CLK 146
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 147
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 148
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 149
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 150
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 151
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 152
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 153
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 154
+#define GCC_UFS_MEM_CLKREF_CLK 155
+#define GCC_UFS_PHY_AHB_CLK 156
+#define GCC_UFS_PHY_AXI_CLK 157
+#define GCC_UFS_PHY_AXI_CLK_SRC 158
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 159
+#define GCC_UFS_PHY_ICE_CORE_CLK 160
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 162
+#define GCC_UFS_PHY_PHY_AUX_CLK 163
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 164
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 170
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 171
+#define GCC_USB30_PRIM_MASTER_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 173
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 175
+#define GCC_USB30_PRIM_SLEEP_CLK 176
+#define GCC_USB30_SEC_MASTER_CLK 177
+#define GCC_USB30_SEC_MASTER_CLK_SRC 178
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 179
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 180
+#define GCC_USB30_SEC_SLEEP_CLK 181
+#define GCC_USB3_PRIM_CLKREF_CLK 182
+#define GCC_USB3_PRIM_PHY_AUX_CLK 183
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 184
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 185
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 186
+#define GCC_USB3_SEC_CLKREF_CLK 187
+#define GCC_USB3_SEC_PHY_AUX_CLK 188
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 189
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 190
+#define GCC_USB3_SEC_PHY_PIPE_CLK 191
+#define GCC_VIDEO_AHB_CLK 192
+#define GCC_VIDEO_AXI0_CLK 193
+#define GCC_VIDEO_AXI1_CLK 194
+#define GCC_VIDEO_AXIC_CLK 195
+#define GCC_VIDEO_XO_CLK 196
+#define GPLL0 197
+#define GPLL0_OUT_EVEN 198
+#define GPLL7 199
+#define GPLL9 200
+
+/* Reset clocks */
+#define GCC_EMAC_BCR 0
+#define GCC_GPU_BCR 1
+#define GCC_MMSS_BCR 2
+#define GCC_NPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_PHY_BCR 5
+#define GCC_PCIE_1_BCR 6
+#define GCC_PCIE_1_PHY_BCR 7
+#define GCC_PCIE_PHY_BCR 8
+#define GCC_PDM_BCR 9
+#define GCC_PRNG_BCR 10
+#define GCC_QSPI_BCR 11
+#define GCC_QUPV3_WRAPPER_0_BCR 12
+#define GCC_QUPV3_WRAPPER_1_BCR 13
+#define GCC_QUPV3_WRAPPER_2_BCR 14
+#define GCC_QUSB2PHY_PRIM_BCR 15
+#define GCC_QUSB2PHY_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3_DP_PHY_PRIM_BCR 18
+#define GCC_USB3_PHY_SEC_BCR 19
+#define GCC_USB3PHY_PHY_SEC_BCR 20
+#define GCC_SDCC2_BCR 21
+#define GCC_SDCC4_BCR 22
+#define GCC_TSIF_BCR 23
+#define GCC_UFS_CARD_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB30_SEC_BCR 27
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28
+
+#endif
diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h
new file mode 100644
index 000000000000..d97840f9ee2e
--- /dev/null
+++ b/include/dt-bindings/clock/rk3308-cru.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H
+
+/* core clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_VPLL0 3
+#define PLL_VPLL1 4
+#define ARMCLK 5
+
+/* sclk (special clocks) */
+#define USB480M 14
+#define SCLK_RTC32K 15
+#define SCLK_PVTM_CORE 16
+#define SCLK_UART0 17
+#define SCLK_UART1 18
+#define SCLK_UART2 19
+#define SCLK_UART3 20
+#define SCLK_UART4 21
+#define SCLK_I2C0 22
+#define SCLK_I2C1 23
+#define SCLK_I2C2 24
+#define SCLK_I2C3 25
+#define SCLK_PWM0 26
+#define SCLK_SPI0 27
+#define SCLK_SPI1 28
+#define SCLK_SPI2 29
+#define SCLK_TIMER0 30
+#define SCLK_TIMER1 31
+#define SCLK_TIMER2 32
+#define SCLK_TIMER3 33
+#define SCLK_TIMER4 34
+#define SCLK_TIMER5 35
+#define SCLK_TSADC 36
+#define SCLK_SARADC 37
+#define SCLK_OTP 38
+#define SCLK_OTP_USR 39
+#define SCLK_CPU_BOOST 40
+#define SCLK_CRYPTO 41
+#define SCLK_CRYPTO_APK 42
+#define SCLK_NANDC_DIV 43
+#define SCLK_NANDC_DIV50 44
+#define SCLK_NANDC 45
+#define SCLK_SDMMC_DIV 46
+#define SCLK_SDMMC_DIV50 47
+#define SCLK_SDMMC 48
+#define SCLK_SDMMC_DRV 49
+#define SCLK_SDMMC_SAMPLE 50
+#define SCLK_SDIO_DIV 51
+#define SCLK_SDIO_DIV50 52
+#define SCLK_SDIO 53
+#define SCLK_SDIO_DRV 54
+#define SCLK_SDIO_SAMPLE 55
+#define SCLK_EMMC_DIV 56
+#define SCLK_EMMC_DIV50 57
+#define SCLK_EMMC 58
+#define SCLK_EMMC_DRV 59
+#define SCLK_EMMC_SAMPLE 60
+#define SCLK_SFC 61
+#define SCLK_OTG_ADP 62
+#define SCLK_MAC_SRC 63
+#define SCLK_MAC 64
+#define SCLK_MAC_REF 65
+#define SCLK_MAC_RX_TX 66
+#define SCLK_MAC_RMII 67
+#define SCLK_DDR_MON_TIMER 68
+#define SCLK_DDR_MON 69
+#define SCLK_DDRCLK 70
+#define SCLK_PMU 71
+#define SCLK_USBPHY_REF 72
+#define SCLK_WIFI 73
+#define SCLK_PVTM_PMU 74
+#define SCLK_PDM 75
+#define SCLK_I2S0_8CH_TX 76
+#define SCLK_I2S0_8CH_TX_OUT 77
+#define SCLK_I2S0_8CH_RX 78
+#define SCLK_I2S0_8CH_RX_OUT 79
+#define SCLK_I2S1_8CH_TX 80
+#define SCLK_I2S1_8CH_TX_OUT 81
+#define SCLK_I2S1_8CH_RX 82
+#define SCLK_I2S1_8CH_RX_OUT 83
+#define SCLK_I2S2_8CH_TX 84
+#define SCLK_I2S2_8CH_TX_OUT 85
+#define SCLK_I2S2_8CH_RX 86
+#define SCLK_I2S2_8CH_RX_OUT 87
+#define SCLK_I2S3_8CH_TX 88
+#define SCLK_I2S3_8CH_TX_OUT 89
+#define SCLK_I2S3_8CH_RX 90
+#define SCLK_I2S3_8CH_RX_OUT 91
+#define SCLK_I2S0_2CH 92
+#define SCLK_I2S0_2CH_OUT 93
+#define SCLK_I2S1_2CH 94
+#define SCLK_I2S1_2CH_OUT 95
+#define SCLK_SPDIF_TX_DIV 96
+#define SCLK_SPDIF_TX_DIV50 97
+#define SCLK_SPDIF_TX 98
+#define SCLK_SPDIF_RX_DIV 99
+#define SCLK_SPDIF_RX_DIV50 100
+#define SCLK_SPDIF_RX 101
+#define SCLK_I2S0_8CH_TX_MUX 102
+#define SCLK_I2S0_8CH_RX_MUX 103
+#define SCLK_I2S1_8CH_TX_MUX 104
+#define SCLK_I2S1_8CH_RX_MUX 105
+#define SCLK_I2S2_8CH_TX_MUX 106
+#define SCLK_I2S2_8CH_RX_MUX 107
+#define SCLK_I2S3_8CH_TX_MUX 108
+#define SCLK_I2S3_8CH_RX_MUX 109
+#define SCLK_I2S0_8CH_TX_SRC 110
+#define SCLK_I2S0_8CH_RX_SRC 111
+#define SCLK_I2S1_8CH_TX_SRC 112
+#define SCLK_I2S1_8CH_RX_SRC 113
+#define SCLK_I2S2_8CH_TX_SRC 114
+#define SCLK_I2S2_8CH_RX_SRC 115
+#define SCLK_I2S3_8CH_TX_SRC 116
+#define SCLK_I2S3_8CH_RX_SRC 117
+#define SCLK_I2S0_2CH_SRC 118
+#define SCLK_I2S1_2CH_SRC 119
+#define SCLK_PWM1 120
+#define SCLK_PWM2 121
+#define SCLK_OWIRE 122
+
+/* dclk */
+#define DCLK_VOP 125
+
+/* aclk */
+#define ACLK_BUS_SRC 130
+#define ACLK_BUS 131
+#define ACLK_PERI_SRC 132
+#define ACLK_PERI 133
+#define ACLK_MAC 134
+#define ACLK_CRYPTO 135
+#define ACLK_VOP 136
+#define ACLK_GIC 137
+#define ACLK_DMAC0 138
+#define ACLK_DMAC1 139
+
+/* hclk */
+#define HCLK_BUS 150
+#define HCLK_PERI 151
+#define HCLK_AUDIO 152
+#define HCLK_NANDC 153
+#define HCLK_SDMMC 154
+#define HCLK_SDIO 155
+#define HCLK_EMMC 156
+#define HCLK_SFC 157
+#define HCLK_OTG 158
+#define HCLK_HOST 159
+#define HCLK_HOST_ARB 160
+#define HCLK_PDM 161
+#define HCLK_SPDIFTX 162
+#define HCLK_SPDIFRX 163
+#define HCLK_I2S0_8CH 164
+#define HCLK_I2S1_8CH 165
+#define HCLK_I2S2_8CH 166
+#define HCLK_I2S3_8CH 167
+#define HCLK_I2S0_2CH 168
+#define HCLK_I2S1_2CH 169
+#define HCLK_VAD 170
+#define HCLK_CRYPTO 171
+#define HCLK_VOP 172
+
+/* pclk */
+#define PCLK_BUS 190
+#define PCLK_DDR 191
+#define PCLK_PERI 192
+#define PCLK_PMU 193
+#define PCLK_AUDIO 194
+#define PCLK_MAC 195
+#define PCLK_ACODEC 196
+#define PCLK_UART0 197
+#define PCLK_UART1 198
+#define PCLK_UART2 199
+#define PCLK_UART3 200
+#define PCLK_UART4 201
+#define PCLK_I2C0 202
+#define PCLK_I2C1 203
+#define PCLK_I2C2 204
+#define PCLK_I2C3 205
+#define PCLK_PWM0 206
+#define PCLK_SPI0 207
+#define PCLK_SPI1 208
+#define PCLK_SPI2 209
+#define PCLK_SARADC 210
+#define PCLK_TSADC 211
+#define PCLK_TIMER 212
+#define PCLK_OTP_NS 213
+#define PCLK_WDT 214
+#define PCLK_GPIO0 215
+#define PCLK_GPIO1 216
+#define PCLK_GPIO2 217
+#define PCLK_GPIO3 218
+#define PCLK_GPIO4 219
+#define PCLK_SGRF 220
+#define PCLK_GRF 221
+#define PCLK_USBSD_DET 222
+#define PCLK_DDR_UPCTL 223
+#define PCLK_DDR_MON 224
+#define PCLK_DDRPHY 225
+#define PCLK_DDR_STDBY 226
+#define PCLK_USB_GRF 227
+#define PCLK_CRU 228
+#define PCLK_OTP_PHY 229
+#define PCLK_CPU_BOOST 230
+#define PCLK_PWM1 231
+#define PCLK_PWM2 232
+#define PCLK_CAN 233
+#define PCLK_OWIRE 234
+
+#define CLK_NR_CLKS (PCLK_OWIRE + 1)
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_TOPDBG 12
+#define SRST_CORE_NOC 13
+#define SRST_STRC_A 14
+#define SRST_L2C 15
+
+/* cru_softrst_con1 */
+#define SRST_DAP 16
+#define SRST_CORE_PVTM 17
+#define SRST_CORE_PRF 18
+#define SRST_CORE_GRF 19
+#define SRST_DDRUPCTL 20
+#define SRST_DDRUPCTL_P 22
+#define SRST_MSCH 23
+#define SRST_DDRMON_P 25
+#define SRST_DDRSTDBY_P 26
+#define SRST_DDRSTDBY 27
+#define SRST_DDRPHY 28
+#define SRST_DDRPHY_DIV 29
+#define SRST_DDRPHY_P 30
+
+/* cru_softrst_con2 */
+#define SRST_BUS_NIU_H 32
+#define SRST_USB_NIU_P 33
+#define SRST_CRYPTO_A 34
+#define SRST_CRYPTO_H 35
+#define SRST_CRYPTO 36
+#define SRST_CRYPTO_APK 37
+#define SRST_VOP_A 38
+#define SRST_VOP_H 39
+#define SRST_VOP_D 40
+#define SRST_INTMEM_A 41
+#define SRST_ROM_H 42
+#define SRST_GIC_A 43
+#define SRST_UART0_P 44
+#define SRST_UART0 45
+#define SRST_UART1_P 46
+#define SRST_UART1 47
+
+/* cru_softrst_con3 */
+#define SRST_UART2_P 48
+#define SRST_UART2 49
+#define SRST_UART3_P 50
+#define SRST_UART3 51
+#define SRST_UART4_P 52
+#define SRST_UART4 53
+#define SRST_I2C0_P 54
+#define SRST_I2C0 55
+#define SRST_I2C1_P 56
+#define SRST_I2C1 57
+#define SRST_I2C2_P 58
+#define SRST_I2C2 59
+#define SRST_I2C3_P 60
+#define SRST_I2C3 61
+#define SRST_PWM0_P 62
+#define SRST_PWM0 63
+
+/* cru_softrst_con4 */
+#define SRST_SPI0_P 64
+#define SRST_SPI0 65
+#define SRST_SPI1_P 66
+#define SRST_SPI1 67
+#define SRST_SPI2_P 68
+#define SRST_SPI2 69
+#define SRST_SARADC_P 70
+#define SRST_TSADC_P 71
+#define SRST_TSADC 72
+#define SRST_TIMER0_P 73
+#define SRST_TIMER0 74
+#define SRST_TIMER1 75
+#define SRST_TIMER2 76
+#define SRST_TIMER3 77
+#define SRST_TIMER4 78
+#define SRST_TIMER5 79
+
+/* cru_softrst_con5 */
+#define SRST_OTP_NS_P 80
+#define SRST_OTP_NS_SBPI 81
+#define SRST_OTP_NS_USR 82
+#define SRST_OTP_PHY_P 83
+#define SRST_OTP_PHY 84
+#define SRST_GPIO0_P 86
+#define SRST_GPIO1_P 87
+#define SRST_GPIO2_P 88
+#define SRST_GPIO3_P 89
+#define SRST_GPIO4_P 90
+#define SRST_GRF_P 91
+#define SRST_USBSD_DET_P 92
+#define SRST_PMU 93
+#define SRST_PMU_PVTM 94
+#define SRST_USB_GRF_P 95
+
+/* cru_softrst_con6 */
+#define SRST_CPU_BOOST 96
+#define SRST_CPU_BOOST_P 97
+#define SRST_PWM1_P 98
+#define SRST_PWM1 99
+#define SRST_PWM2_P 100
+#define SRST_PWM2 101
+#define SRST_PERI_NIU_A 104
+#define SRST_PERI_NIU_H 105
+#define SRST_PERI_NIU_p 106
+#define SRST_USB2OTG_H 107
+#define SRST_USB2OTG 108
+#define SRST_USB2OTG_ADP 109
+#define SRST_USB2HOST_H 110
+#define SRST_USB2HOST_ARB_H 111
+
+/* cru_softrst_con7 */
+#define SRST_USB2HOST_AUX_H 112
+#define SRST_USB2HOST_EHCI 113
+#define SRST_USB2HOST 114
+#define SRST_USBPHYPOR 115
+#define SRST_UTMI0 116
+#define SRST_UTMI1 117
+#define SRST_SDIO_H 118
+#define SRST_EMMC_H 119
+#define SRST_SFC_H 120
+#define SRST_SFC 121
+#define SRST_SD_H 122
+#define SRST_NANDC_H 123
+#define SRST_NANDC_N 124
+#define SRST_MAC_A 125
+#define SRST_CAN_P 126
+#define SRST_OWIRE_P 127
+
+/* cru_softrst_con8 */
+#define SRST_AUDIO_NIU_H 128
+#define SRST_AUDIO_NIU_P 129
+#define SRST_PDM_H 130
+#define SRST_PDM_M 131
+#define SRST_SPDIFTX_H 132
+#define SRST_SPDIFTX_M 133
+#define SRST_SPDIFRX_H 134
+#define SRST_SPDIFRX_M 135
+#define SRST_I2S0_8CH_H 136
+#define SRST_I2S0_8CH_TX_M 137
+#define SRST_I2S0_8CH_RX_M 138
+#define SRST_I2S1_8CH_H 139
+#define SRST_I2S1_8CH_TX_M 140
+#define SRST_I2S1_8CH_RX_M 141
+#define SRST_I2S2_8CH_H 142
+#define SRST_I2S2_8CH_TX_M 143
+
+/* cru_softrst_con9 */
+#define SRST_I2S2_8CH_RX_M 144
+#define SRST_I2S3_8CH_H 145
+#define SRST_I2S3_8CH_TX_M 146
+#define SRST_I2S3_8CH_RX_M 147
+#define SRST_I2S0_2CH_H 148
+#define SRST_I2S0_2CH_M 149
+#define SRST_I2S1_2CH_H 150
+#define SRST_I2S1_2CH_M 151
+#define SRST_VAD_H 152
+#define SRST_ACODEC_P 153
+
+#endif
diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h
index c0d5d5599c87..014ac6123d17 100644
--- a/include/dt-bindings/clock/sun8i-v3s-ccu.h
+++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h
@@ -104,4 +104,8 @@
#define CLK_MIPI_CSI 73
+/* Clocks not available on V3s */
+#define CLK_BUS_I2S0 75
+#define CLK_I2S0 76
+
#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */
diff --git a/include/dt-bindings/gce/mt8183-gce.h b/include/dt-bindings/gce/mt8183-gce.h
new file mode 100644
index 000000000000..29c967476f73
--- /dev/null
+++ b/include/dt-bindings/gce/mt8183-gce.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Bibby Hsieh <bibby.hsieh@mediatek.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8183_H
+#define _DT_BINDINGS_GCE_MT8183_H
+
+#define CMDQ_NO_TIMEOUT 0xffffffff
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_HIGHEST 1
+
+/* GCE SUBSYS */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1880XXXX 5
+#define SUBSYS_1881XXXX 6
+#define SUBSYS_1882XXXX 7
+#define SUBSYS_1883XXXX 8
+#define SUBSYS_1884XXXX 9
+#define SUBSYS_1000XXXX 10
+#define SUBSYS_1001XXXX 11
+#define SUBSYS_1002XXXX 12
+#define SUBSYS_1003XXXX 13
+#define SUBSYS_1004XXXX 14
+#define SUBSYS_1005XXXX 15
+#define SUBSYS_1020XXXX 16
+#define SUBSYS_1028XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1800XXXX 22
+#define SUBSYS_1801XXXX 23
+#define SUBSYS_1802XXXX 24
+#define SUBSYS_1804XXXX 25
+#define SUBSYS_1805XXXX 26
+#define SUBSYS_1808XXXX 27
+#define SUBSYS_180aXXXX 28
+#define SUBSYS_180bXXXX 29
+
+#define CMDQ_EVENT_DISP_RDMA0_SOF 0
+#define CMDQ_EVENT_DISP_RDMA1_SOF 1
+#define CMDQ_EVENT_MDP_RDMA0_SOF 2
+#define CMDQ_EVENT_MDP_RSZ0_SOF 4
+#define CMDQ_EVENT_MDP_RSZ1_SOF 5
+#define CMDQ_EVENT_MDP_TDSHP_SOF 6
+#define CMDQ_EVENT_MDP_WROT0_SOF 7
+#define CMDQ_EVENT_MDP_WDMA0_SOF 8
+#define CMDQ_EVENT_DISP_OVL0_SOF 9
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 10
+#define CMDQ_EVENT_DISP_OVL1_2L_SOF 11
+#define CMDQ_EVENT_DISP_WDMA0_SOF 12
+#define CMDQ_EVENT_DISP_COLOR0_SOF 13
+#define CMDQ_EVENT_DISP_CCORR0_SOF 14
+#define CMDQ_EVENT_DISP_AAL0_SOF 15
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 16
+#define CMDQ_EVENT_DISP_DITHER0_SOF 17
+#define CMDQ_EVENT_DISP_PWM0_SOF 18
+#define CMDQ_EVENT_DISP_DSI0_SOF 19
+#define CMDQ_EVENT_DISP_DPI0_SOF 20
+#define CMDQ_EVENT_DISP_RSZ_SOF 22
+#define CMDQ_EVENT_MDP_AAL_SOF 23
+#define CMDQ_EVENT_MDP_CCORR_SOF 24
+#define CMDQ_EVENT_DISP_DBI_SOF 25
+#define CMDQ_EVENT_DISP_RDMA0_EOF 26
+#define CMDQ_EVENT_DISP_RDMA1_EOF 27
+#define CMDQ_EVENT_MDP_RDMA0_EOF 28
+#define CMDQ_EVENT_MDP_RSZ0_EOF 30
+#define CMDQ_EVENT_MDP_RSZ1_EOF 31
+#define CMDQ_EVENT_MDP_TDSHP_EOF 32
+#define CMDQ_EVENT_MDP_WROT0_EOF 33
+#define CMDQ_EVENT_MDP_WDMA0_EOF 34
+#define CMDQ_EVENT_DISP_OVL0_EOF 35
+#define CMDQ_EVENT_DISP_OVL0_2L_EOF 36
+#define CMDQ_EVENT_DISP_OVL1_2L_EOF 37
+#define CMDQ_EVENT_DISP_WDMA0_EOF 38
+#define CMDQ_EVENT_DISP_COLOR0_EOF 39
+#define CMDQ_EVENT_DISP_CCORR0_EOF 40
+#define CMDQ_EVENT_DISP_AAL0_EOF 41
+#define CMDQ_EVENT_DISP_GAMMA0_EOF 42
+#define CMDQ_EVENT_DISP_DITHER0_EOF 43
+#define CMDQ_EVENT_DSI0_EOF 44
+#define CMDQ_EVENT_DPI0_EOF 45
+#define CMDQ_EVENT_DISP_RSZ_EOF 47
+#define CMDQ_EVENT_MDP_AAL_EOF 48
+#define CMDQ_EVENT_MDP_CCORR_EOF 49
+#define CMDQ_EVENT_DBI_EOF 50
+#define CMDQ_EVENT_MUTEX_STREAM_DONE0 130
+#define CMDQ_EVENT_MUTEX_STREAM_DONE1 131
+#define CMDQ_EVENT_MUTEX_STREAM_DONE2 132
+#define CMDQ_EVENT_MUTEX_STREAM_DONE3 133
+#define CMDQ_EVENT_MUTEX_STREAM_DONE4 134
+#define CMDQ_EVENT_MUTEX_STREAM_DONE5 135
+#define CMDQ_EVENT_MUTEX_STREAM_DONE6 136
+#define CMDQ_EVENT_MUTEX_STREAM_DONE7 137
+#define CMDQ_EVENT_MUTEX_STREAM_DONE8 138
+#define CMDQ_EVENT_MUTEX_STREAM_DONE9 139
+#define CMDQ_EVENT_MUTEX_STREAM_DONE10 140
+#define CMDQ_EVENT_MUTEX_STREAM_DONE11 141
+#define CMDQ_EVENT_DISP_RDMA0_BUF_UNDERRUN_EVEN 142
+#define CMDQ_EVENT_DISP_RDMA1_BUF_UNDERRUN_EVEN 143
+#define CMDQ_EVENT_DSI0_TE_EVENT 144
+#define CMDQ_EVENT_DSI0_IRQ_EVENT 145
+#define CMDQ_EVENT_DSI0_DONE_EVENT 146
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE 150
+#define CMDQ_EVENT_MDP_WDMA_SW_RST_DONE 151
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE 152
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE 154
+#define CMDQ_EVENT_DISP_OVL0_FRAME_RST_DONE_PULE 155
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_RST_DONE_ULSE 156
+#define CMDQ_EVENT_DISP_OVL1_2L_FRAME_RST_DONE_ULSE 157
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_0 257
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_1 258
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_2 259
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_3 260
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_4 261
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_5 262
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_6 263
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_7 264
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_8 265
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_9 266
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_10 267
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_11 268
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_12 269
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_13 270
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_14 271
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_15 272
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_16 273
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_17 274
+#define CMDQ_EVENT_ISP_FRAME_DONE_P2_18 275
+#define CMDQ_EVENT_AMD_FRAME_DONE 276
+#define CMDQ_EVENT_DVE_DONE 277
+#define CMDQ_EVENT_WMFE_DONE 278
+#define CMDQ_EVENT_RSC_DONE 279
+#define CMDQ_EVENT_MFB_DONE 280
+#define CMDQ_EVENT_WPE_A_DONE 281
+#define CMDQ_EVENT_SPE_B_DONE 282
+#define CMDQ_EVENT_OCC_DONE 283
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 289
+#define CMDQ_EVENT_JPG_ENC_CMDQ_DONE 290
+#define CMDQ_EVENT_JPG_DEC_CMDQ_DONE 291
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 292
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_DONE 293
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 321
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 322
+#define CMDQ_EVENT_CAMSV0_PASS1_DONE 323
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 324
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 325
+#define CMDQ_EVENT_TSF_DONE 326
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 327
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 328
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 329
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 330
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 331
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 332
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 333
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 334
+#define CMDQ_EVENT_IPU_CORE0_DONE0 353
+#define CMDQ_EVENT_IPU_CORE0_DONE1 354
+#define CMDQ_EVENT_IPU_CORE0_DONE2 355
+#define CMDQ_EVENT_IPU_CORE0_DONE3 356
+#define CMDQ_EVENT_IPU_CORE1_DONE0 385
+#define CMDQ_EVENT_IPU_CORE1_DONE1 386
+#define CMDQ_EVENT_IPU_CORE1_DONE2 387
+#define CMDQ_EVENT_IPU_CORE1_DONE3 388
+
+#endif
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
index 45e11b6170ca..499de6216581 100644
--- a/include/dt-bindings/pinctrl/k3.h
+++ b/include/dt-bindings/pinctrl/k3.h
@@ -32,4 +32,7 @@
#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
#endif
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h
new file mode 100644
index 000000000000..8804e34ebdd4
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt8183-resets.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Yong Liang <yong.liang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8183
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8183
+
+/* INFRACFG AO resets */
+#define MT8183_INFRACFG_AO_THERM_SW_RST 0
+#define MT8183_INFRACFG_AO_USB_TOP_SW_RST 1
+#define MT8183_INFRACFG_AO_MM_IOMMU_SW_RST 3
+#define MT8183_INFRACFG_AO_MSDC3_SW_RST 4
+#define MT8183_INFRACFG_AO_MSDC2_SW_RST 5
+#define MT8183_INFRACFG_AO_MSDC1_SW_RST 6
+#define MT8183_INFRACFG_AO_MSDC0_SW_RST 7
+#define MT8183_INFRACFG_AO_APDMA_SW_RST 9
+#define MT8183_INFRACFG_AO_MIMP_D_SW_RST 10
+#define MT8183_INFRACFG_AO_BTIF_SW_RST 12
+#define MT8183_INFRACFG_AO_DISP_PWM_SW_RST 14
+#define MT8183_INFRACFG_AO_AUXADC_SW_RST 15
+
+#define MT8183_INFRACFG_AO_IRTX_SW_RST 32
+#define MT8183_INFRACFG_AO_SPI0_SW_RST 33
+#define MT8183_INFRACFG_AO_I2C0_SW_RST 34
+#define MT8183_INFRACFG_AO_I2C1_SW_RST 35
+#define MT8183_INFRACFG_AO_I2C2_SW_RST 36
+#define MT8183_INFRACFG_AO_I2C3_SW_RST 37
+#define MT8183_INFRACFG_AO_UART0_SW_RST 38
+#define MT8183_INFRACFG_AO_UART1_SW_RST 39
+#define MT8183_INFRACFG_AO_UART2_SW_RST 40
+#define MT8183_INFRACFG_AO_PWM_SW_RST 41
+#define MT8183_INFRACFG_AO_SPI1_SW_RST 42
+#define MT8183_INFRACFG_AO_I2C4_SW_RST 43
+#define MT8183_INFRACFG_AO_DVFSP_SW_RST 44
+#define MT8183_INFRACFG_AO_SPI2_SW_RST 45
+#define MT8183_INFRACFG_AO_SPI3_SW_RST 46
+#define MT8183_INFRACFG_AO_UFSHCI_SW_RST 47
+
+#define MT8183_INFRACFG_AO_PMIC_WRAP_SW_RST 64
+#define MT8183_INFRACFG_AO_SPM_SW_RST 65
+#define MT8183_INFRACFG_AO_USBSIF_SW_RST 66
+#define MT8183_INFRACFG_AO_KP_SW_RST 68
+#define MT8183_INFRACFG_AO_APXGPT_SW_RST 69
+#define MT8183_INFRACFG_AO_CLDMA_AO_SW_RST 70
+#define MT8183_INFRACFG_AO_UNIPRO_UFS_SW_RST 71
+#define MT8183_INFRACFG_AO_DX_CC_SW_RST 72
+#define MT8183_INFRACFG_AO_UFSPHY_SW_RST 73
+
+#define MT8183_INFRACFG_AO_DX_CC_SEC_SW_RST 96
+#define MT8183_INFRACFG_AO_GCE_SW_RST 97
+#define MT8183_INFRACFG_AO_CLDMA_SW_RST 98
+#define MT8183_INFRACFG_AO_TRNG_SW_RST 99
+#define MT8183_INFRACFG_AO_AP_MD_CCIF_1_SW_RST 103
+#define MT8183_INFRACFG_AO_AP_MD_CCIF_SW_RST 104
+#define MT8183_INFRACFG_AO_I2C1_IMM_SW_RST 105
+#define MT8183_INFRACFG_AO_I2C1_ARB_SW_RST 106
+#define MT8183_INFRACFG_AO_I2C2_IMM_SW_RST 107
+#define MT8183_INFRACFG_AO_I2C2_ARB_SW_RST 108
+#define MT8183_INFRACFG_AO_I2C5_SW_RST 109
+#define MT8183_INFRACFG_AO_I2C5_IMM_SW_RST 110
+#define MT8183_INFRACFG_AO_I2C5_ARB_SW_RST 111
+#define MT8183_INFRACFG_AO_SPI4_SW_RST 112
+#define MT8183_INFRACFG_AO_SPI5_SW_RST 113
+#define MT8183_INFRACFG_AO_INFRA2MFGAXI_CBIP_CLAS_SW_RST 114
+#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M0_CBIP_GLAS_OUT_SW_RST 115
+#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M1_CBIP_GLAS_OUT_SW_RST 116
+#define MT8183_INFRACFG_AO_UFS_AES_SW_RST 117
+#define MT8183_INFRACFG_AO_CCU_I2C_IRQ_SW_RST 118
+#define MT8183_INFRACFG_AO_CCU_I2C_DMA_SW_RST 119
+#define MT8183_INFRACFG_AO_I2C6_SW_RST 120
+#define MT8183_INFRACFG_AO_CCU_GALS_SW_RST 121
+#define MT8183_INFRACFG_AO_IPU_GALS_SW_RST 122
+#define MT8183_INFRACFG_AO_CONN2AP_GALS_SW_RST 123
+#define MT8183_INFRACFG_AO_AP_MD_CCIF2_SW_RST 124
+#define MT8183_INFRACFG_AO_AP_MD_CCIF3_SW_RST 125
+#define MT8183_INFRACFG_AO_I2C7_SW_RST 126
+#define MT8183_INFRACFG_AO_I2C8_SW_RST 127
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */
diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h
index e7c362a81a97..63aff7d8aa45 100644
--- a/include/dt-bindings/reset/hisi,hi6220-resets.h
+++ b/include/dt-bindings/reset/hisi,hi6220-resets.h
@@ -73,4 +73,11 @@
#define MEDIA_MMU 6
#define MEDIA_XG2RAM1 7
+#define AO_G3D 1
+#define AO_CODECISP 2
+#define AO_MCPU 4
+#define AO_BBPHARQMEM 5
+#define AO_HIFI 8
+#define AO_ACPUSCUL2C 12
+
#endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/
diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h
index b58ef21a2e18..b6790173afd6 100644
--- a/include/dt-bindings/reset/sun8i-v3s-ccu.h
+++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h
@@ -75,4 +75,7 @@
#define RST_BUS_UART1 50
#define RST_BUS_UART2 51
+/* Reset lines not available on V3s */
+#define RST_BUS_I2S0 52
+
#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 978cc239f23b..8b4e516bac00 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -643,6 +643,12 @@ bool acpi_gtdt_c3stop(int type);
int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
#endif
+#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER
+static inline void acpi_arch_set_root_pointer(u64 addr)
+{
+}
+#endif
+
#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
static inline u64 acpi_arch_get_root_pointer(void)
{
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
index 516a6fda83c5..421b0fa90d6a 100644
--- a/include/linux/amba/clcd-regs.h
+++ b/include/linux/amba/clcd-regs.h
@@ -42,6 +42,7 @@
#define TIM2_PCD_LO_MASK GENMASK(4, 0)
#define TIM2_PCD_LO_BITS 5
#define TIM2_CLKSEL (1 << 5)
+#define TIM2_ACB_MASK GENMASK(10, 6)
#define TIM2_IVS (1 << 11)
#define TIM2_IHS (1 << 12)
#define TIM2_IPC (1 << 13)
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 0b5897446dca..c7d6b2e8c3b5 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -46,6 +46,12 @@ enum backlight_notification {
BACKLIGHT_UNREGISTERED,
};
+enum backlight_scale {
+ BACKLIGHT_SCALE_UNKNOWN = 0,
+ BACKLIGHT_SCALE_LINEAR,
+ BACKLIGHT_SCALE_NON_LINEAR,
+};
+
struct backlight_device;
struct fb_info;
@@ -80,6 +86,8 @@ struct backlight_properties {
enum backlight_type type;
/* Flags used to signal drivers of state changes */
unsigned int state;
+ /* Type of the brightness scale (linear, non-linear, ...) */
+ enum backlight_scale scale;
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3094f2d513b2..f3ea78b0c91c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1110,6 +1110,8 @@ extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern void blk_queue_required_elevator_features(struct request_queue *q,
unsigned int features);
+extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ struct device *dev);
/*
* Number of physical segments as sent to the device.
@@ -1522,10 +1524,14 @@ struct blk_integrity_iter {
};
typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef void (integrity_prepare_fn) (struct request *);
+typedef void (integrity_complete_fn) (struct request *, unsigned int);
struct blk_integrity_profile {
integrity_processing_fn *generate_fn;
integrity_processing_fn *verify_fn;
+ integrity_prepare_fn *prepare_fn;
+ integrity_complete_fn *complete_fn;
const char *name;
};
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 82156da3c650..b9dbda1c26aa 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -293,6 +293,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
+extern void ceph_reset_client_addr(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client,
unsigned long started);
extern int ceph_open_session(struct ceph_client *client);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 23895d178149..c4458dc6a757 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -337,6 +337,7 @@ extern void ceph_msgr_flush(void);
extern void ceph_messenger_init(struct ceph_messenger *msgr,
struct ceph_entity_addr *myaddr);
extern void ceph_messenger_fini(struct ceph_messenger *msgr);
+extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index b4d134d3312a..dbb8a6959a73 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -109,6 +109,7 @@ extern int ceph_monmap_contains(struct ceph_monmap *m,
extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
+extern void ceph_monc_reopen_session(struct ceph_mon_client *monc);
enum {
CEPH_SUB_MONMAP = 0,
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ad7fe5d10dcd..eaffbdddf89a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -381,6 +381,7 @@ extern void ceph_osdc_cleanup(void);
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
struct ceph_client *client);
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
+extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
@@ -388,6 +389,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
+void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
#define osd_req_op_data(oreq, whch, typ, fld) \
({ \
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index dce5521a9bf6..2fdfe8061363 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -299,7 +299,8 @@ struct clk_init_data {
* into the clk API
*
* @init: pointer to struct clk_init_data that contains the init data shared
- * with the common clock framework.
+ * with the common clock framework. This pointer will be set to NULL once
+ * a clk_register() variant is called on this clk_hw pointer.
*/
struct clk_hw {
struct clk_core *core;
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 853a8f181394..18b7b95a8253 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -239,7 +239,8 @@ static inline int clk_prepare(struct clk *clk)
return 0;
}
-static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check
+clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
{
might_sleep();
return 0;
@@ -263,7 +264,8 @@ static inline void clk_unprepare(struct clk *clk)
{
might_sleep();
}
-static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
+static inline void clk_bulk_unprepare(int num_clks,
+ const struct clk_bulk_data *clks)
{
might_sleep();
}
@@ -820,7 +822,8 @@ static inline int clk_enable(struct clk *clk)
return 0;
}
-static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_enable(int num_clks,
+ const struct clk_bulk_data *clks)
{
return 0;
}
@@ -829,7 +832,7 @@ static inline void clk_disable(struct clk *clk) {}
static inline void clk_bulk_disable(int num_clks,
- struct clk_bulk_data *clks) {}
+ const struct clk_bulk_data *clks) {}
static inline unsigned long clk_get_rate(struct clk *clk)
{
@@ -918,8 +921,8 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-static inline int __must_check clk_bulk_prepare_enable(int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check
+clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks)
{
int ret;
@@ -934,7 +937,7 @@ static inline int __must_check clk_bulk_prepare_enable(int num_clks,
}
static inline void clk_bulk_disable_unprepare(int num_clks,
- struct clk_bulk_data *clks)
+ const struct clk_bulk_data *clks)
{
clk_bulk_disable(num_clks, clks);
clk_bulk_unprepare(num_clks, clks);
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index 85f8cf9d1226..eae9652c70cd 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -4,6 +4,9 @@
* Sylwester Nawrocki <s.nawrocki@samsung.com>
*/
+#ifndef __CLK_CONF_H
+#define __CLK_CONF_H
+
#include <linux/types.h>
struct device_node;
@@ -17,3 +20,5 @@ static inline int of_clk_set_defaults(struct device_node *node,
return 0;
}
#endif
+
+#endif /* __CLK_CONF_H */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 9569e7c786d3..4b898cdbdf05 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -129,11 +129,8 @@ static inline bool compaction_failed(enum compact_result result)
return false;
}
-/*
- * Compaction has backed off for some reason. It might be throttling or
- * lock contention. Retrying is still worthwhile.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
+/* Compaction needs reclaim to be performed first, so it can continue. */
+static inline bool compaction_needs_reclaim(enum compact_result result)
{
/*
* Compaction backed off due to watermark checks for order-0
@@ -142,6 +139,16 @@ static inline bool compaction_withdrawn(enum compact_result result)
if (result == COMPACT_SKIPPED)
return true;
+ return false;
+}
+
+/*
+ * Compaction has backed off for some reason after doing some work or none
+ * at all. It might be throttling or lock contention. Retrying might be still
+ * worthwhile, but with a higher priority if allowed.
+ */
+static inline bool compaction_withdrawn(enum compact_result result)
+{
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
@@ -207,6 +214,11 @@ static inline bool compaction_failed(enum compact_result result)
return false;
}
+static inline bool compaction_needs_reclaim(enum compact_result result)
+{
+ return false;
+}
+
static inline bool compaction_withdrawn(enum compact_result result)
{
return true;
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 599c27b56c29..72393a8c1a6c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -130,10 +130,6 @@ struct ftrace_likely_data {
/*
* Force always-inline if the user requests it so via the .config.
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well by using "unused"
- * function attribute, which is redundant but not harmful for gcc.
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
@@ -144,14 +140,35 @@ struct ftrace_likely_data {
*/
#if !defined(CONFIG_OPTIMIZE_INLINING)
#define inline inline __attribute__((__always_inline__)) __gnu_inline \
- __maybe_unused notrace
+ __inline_maybe_unused notrace
#else
#define inline inline __gnu_inline \
- __maybe_unused notrace
+ __inline_maybe_unused notrace
#endif
+/*
+ * gcc provides both __inline__ and __inline as alternate spellings of
+ * the inline keyword, though the latter is undocumented. New kernel
+ * code should only use the inline spelling, but some existing code
+ * uses __inline__. Since we #define inline above, to ensure
+ * __inline__ has the same semantics, we need this #define.
+ *
+ * However, the spelling __inline is strictly reserved for referring
+ * to the bare keyword.
+ */
#define __inline__ inline
-#define __inline inline
+
+/*
+ * GCC does not warn about unused static inline functions for -Wunused-function.
+ * Suppress the warning in clang as well by using __maybe_unused, but enable it
+ * for W=1 build. This will allow clang to find unused functions. Remove the
+ * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings.
+ */
+#ifdef KBUILD_EXTRA_WARN1
+#define __inline_maybe_unused
+#else
+#define __inline_maybe_unused __maybe_unused
+#endif
/*
* Rather then using noinline to prevent stack consumption, use
@@ -189,6 +206,12 @@ struct ftrace_likely_data {
#define asm_volatile_goto(x...) asm goto(x)
#endif
+#ifdef CONFIG_CC_HAS_ASM_INLINE
+#define asm_inline asm __inline
+#else
+#define asm_inline asm
+#endif
+
#ifndef __no_fgcse
# define __no_fgcse
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 88dc0c653925..d0633ebdaa9c 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -201,12 +201,14 @@ enum cpuhp_smt_control {
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology(void);
+extern bool cpu_smt_possible(void);
extern int cpuhp_smt_enable(void);
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
#else
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology(void) { }
+static inline bool cpu_smt_possible(void) { return false; }
static inline int cpuhp_smt_enable(void) { return 0; }
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b5a5a1ed9efd..78a73eba64dd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -200,8 +200,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_wrap(cpu, mask, start) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
#else
/**
* cpumask_first - get the first cpu in a cpumask
@@ -290,20 +290,20 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the first cpumask pointer
- * @and: the second cpumask pointer
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask, &and);
+ * cpumask_and(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask, and) \
+#define for_each_cpu_and(cpu, mask1, mask2) \
for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask), (and)), \
+ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
(cpu) < nr_cpu_ids;)
#endif /* SMP */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index f774c5eb9e3c..4664fc1871de 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
return -EOPNOTSUPP;
}
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
+
+#ifdef CONFIG_PROC_VMCORE
+ssize_t read_from_oldmem(char *buf, size_t count,
+ u64 *ppos, int userbuf,
+ bool encrypted);
+#else
+static inline ssize_t read_from_oldmem(char *buf, size_t count,
+ u64 *ppos, int userbuf,
+ bool encrypted)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE */
+
#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index f7a30e0099be..18639c069263 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -386,7 +386,6 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
-#define current_security() (current_cred_xxx(security))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bae060fae862..ec212cb27fdc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -306,7 +306,7 @@ struct dma_buf {
struct module *owner;
struct list_head list_node;
void *priv;
- struct reservation_object *resv;
+ struct dma_resv *resv;
/* poll support */
wait_queue_head_t poll;
@@ -365,7 +365,7 @@ struct dma_buf_export_info {
const struct dma_buf_ops *ops;
size_t size;
int flags;
- struct reservation_object *resv;
+ struct dma_resv *resv;
void *priv;
};
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 05d29dbc7e62..3347c54f3a87 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -63,15 +63,35 @@ struct dma_fence_cb;
* been completed, or never called at all.
*/
struct dma_fence {
- struct kref refcount;
- const struct dma_fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
spinlock_t *lock;
+ const struct dma_fence_ops *ops;
+ /*
+ * We clear the callback list on kref_put so that by the time we
+ * release the fence it is unused. No one should be adding to the
+ * cb_list that they don't themselves hold a reference for.
+ *
+ * The lifetime of the timestamp is similarly tied to both the
+ * rcu freelist and the cb_list. The timestamp is only set upon
+ * signaling while simultaneously notifying the cb_list. Ergo, we
+ * only use either the cb_list of timestamp. Upon destruction,
+ * neither are accessible, and so we can use the rcu. This means
+ * that the cb_list is *only* valid until the signal bit is set,
+ * and to read either you *must* hold a reference to the fence,
+ * and not just the rcu_read_lock.
+ *
+ * Listed in chronological order.
+ */
+ union {
+ struct list_head cb_list;
+ /* @cb_list replaced by @timestamp on dma_fence_signal() */
+ ktime_t timestamp;
+ /* @timestamp replaced by @rcu on dma_fence_release() */
+ struct rcu_head rcu;
+ };
u64 context;
u64 seqno;
unsigned long flags;
- ktime_t timestamp;
+ struct kref refcount;
int error;
};
@@ -273,7 +293,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
}
/**
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
* rcu read lock
* @fence: fence to increase refcount of
*
@@ -297,7 +317,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
- * fences, such as used by struct reservation_object. When using a seqlock,
+ * fences, such as used by struct dma_resv. When using a seqlock,
* the seqlock must be taken before and checked after a reference to the
* fence is acquired (as shown here).
*
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 14702e2d6fa8..4a1c4fca475a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -131,6 +131,7 @@ struct dma_map_ops {
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev);
+ unsigned long (*get_merge_boundary)(struct device *dev);
};
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -457,11 +458,13 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
+bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+unsigned long dma_get_merge_boundary(struct device *dev);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
@@ -547,6 +550,10 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
{
return -ENXIO;
}
+static inline bool dma_can_mmap(struct device *dev)
+{
+ return false;
+}
static inline int dma_supported(struct device *dev, u64 mask)
{
return 0;
@@ -567,6 +574,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
+static inline unsigned long dma_get_merge_boundary(struct device *dev)
+{
+ return 0;
+}
#endif /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -610,16 +621,14 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
+struct page **dma_common_find_pages(void *cpu_addr);
void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
pgprot_t prot, const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+ pgprot_t prot, const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size);
-int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
bool dma_in_atomic_pool(void *start, size_t size);
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(void *start, size_t size);
@@ -749,7 +758,6 @@ static inline int dma_get_cache_alignment(void)
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
-void dma_release_declared_memory(struct device *dev);
#else
static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -757,11 +765,6 @@ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
{
return -ENOSYS;
}
-
-static inline void
-dma_release_declared_memory(struct device *dev)
-{
-}
#endif /* CONFIG_DMA_DECLARE_COHERENT */
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
@@ -781,9 +784,6 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size,
return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
-#ifndef dma_alloc_writecombine
-#define dma_alloc_writecombine dma_alloc_wc
-#endif
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
@@ -791,9 +791,6 @@ static inline void dma_free_wc(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_addr,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_free_writecombine
-#define dma_free_writecombine dma_free_wc
-#endif
static inline int dma_mmap_wc(struct device *dev,
struct vm_area_struct *vma,
@@ -803,9 +800,6 @@ static inline int dma_mmap_wc(struct device *dev,
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_mmap_writecombine
-#define dma_mmap_writecombine dma_mmap_wc
-#endif
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 0bff3d7fac92..dd3de6d88fc0 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -3,6 +3,7 @@
#define _LINUX_DMA_NONCOHERENT_H 1
#include <linux/dma-mapping.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
@@ -42,10 +43,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr);
-pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
- unsigned long attrs);
#ifdef CONFIG_MMU
+/*
+ * Page protection so that devices that can't snoop CPU caches can use the
+ * memory coherently. We default to pgprot_noncached which is usually used
+ * for ioremap as a safe bet, but architectures can override this with less
+ * strict semantics if possible.
+ */
+#ifndef pgprot_dmacoherent
+#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
+#endif
+
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
#else
static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
diff --git a/include/linux/reservation.h b/include/linux/dma-resv.h
index 644a22dbe53b..ee50d10f052b 100644
--- a/include/linux/reservation.h
+++ b/include/linux/dma-resv.h
@@ -50,98 +50,52 @@ extern struct lock_class_key reservation_seqcount_class;
extern const char reservation_seqcount_string[];
/**
- * struct reservation_object_list - a list of shared fences
+ * struct dma_resv_list - a list of shared fences
* @rcu: for internal use
* @shared_count: table of shared fences
* @shared_max: for growing shared fence table
* @shared: shared fence table
*/
-struct reservation_object_list {
+struct dma_resv_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
struct dma_fence __rcu *shared[];
};
/**
- * struct reservation_object - a reservation object manages fences for a buffer
+ * struct dma_resv - a reservation object manages fences for a buffer
* @lock: update side lock
* @seq: sequence count for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
*/
-struct reservation_object {
+struct dma_resv {
struct ww_mutex lock;
seqcount_t seq;
struct dma_fence __rcu *fence_excl;
- struct reservation_object_list __rcu *fence;
+ struct dma_resv_list __rcu *fence;
};
-#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define reservation_object_assert_held(obj) \
- lockdep_assert_held(&(obj)->lock.base)
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
/**
- * reservation_object_init - initialize a reservation object
- * @obj: the reservation object
- */
-static inline void
-reservation_object_init(struct reservation_object *obj)
-{
- ww_mutex_init(&obj->lock, &reservation_ww_class);
-
- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
- RCU_INIT_POINTER(obj->fence, NULL);
- RCU_INIT_POINTER(obj->fence_excl, NULL);
-}
-
-/**
- * reservation_object_fini - destroys a reservation object
- * @obj: the reservation object
- */
-static inline void
-reservation_object_fini(struct reservation_object *obj)
-{
- int i;
- struct reservation_object_list *fobj;
- struct dma_fence *excl;
-
- /*
- * This object should be dead and all references must have
- * been released to it, so no need to be protected with rcu.
- */
- excl = rcu_dereference_protected(obj->fence_excl, 1);
- if (excl)
- dma_fence_put(excl);
-
- fobj = rcu_dereference_protected(obj->fence, 1);
- if (fobj) {
- for (i = 0; i < fobj->shared_count; ++i)
- dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
-
- kfree(fobj);
- }
-
- ww_mutex_destroy(&obj->lock);
-}
-
-/**
- * reservation_object_get_list - get the reservation object's
+ * dma_resv_get_list - get the reservation object's
* shared fence list, with update-side lock held
* @obj: the reservation object
*
* Returns the shared fence list. Does NOT take references to
* the fence. The obj->lock must be held.
*/
-static inline struct reservation_object_list *
-reservation_object_get_list(struct reservation_object *obj)
+static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
{
return rcu_dereference_protected(obj->fence,
- reservation_object_held(obj));
+ dma_resv_held(obj));
}
/**
- * reservation_object_lock - lock the reservation object
+ * dma_resv_lock - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
*
@@ -155,15 +109,14 @@ reservation_object_get_list(struct reservation_object *obj)
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
*/
-static inline int
-reservation_object_lock(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
+static inline int dma_resv_lock(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
{
return ww_mutex_lock(&obj->lock, ctx);
}
/**
- * reservation_object_lock_interruptible - lock the reservation object
+ * dma_resv_lock_interruptible - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
*
@@ -177,16 +130,45 @@ reservation_object_lock(struct reservation_object *obj,
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
*/
-static inline int
-reservation_object_lock_interruptible(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
+static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
{
return ww_mutex_lock_interruptible(&obj->lock, ctx);
}
+/**
+ * dma_resv_lock_slow - slowpath lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object after a die case. This function
+ * will sleep until the lock becomes available. See dma_resv_lock() as
+ * well.
+ */
+static inline void dma_resv_lock_slow(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ ww_mutex_lock_slow(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow_interruptible - slowpath lock the reservation
+ * object, interruptible
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object interruptible after a die case. This function
+ * will sleep until the lock becomes available. See
+ * dma_resv_lock_interruptible() as well.
+ */
+static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
+}
/**
- * reservation_object_trylock - trylock the reservation object
+ * dma_resv_trylock - trylock the reservation object
* @obj: the reservation object
*
* Tries to lock the reservation object for exclusive access and modification.
@@ -199,26 +181,46 @@ reservation_object_lock_interruptible(struct reservation_object *obj,
*
* Returns true if the lock was acquired, false otherwise.
*/
-static inline bool __must_check
-reservation_object_trylock(struct reservation_object *obj)
+static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{
return ww_mutex_trylock(&obj->lock);
}
/**
- * reservation_object_unlock - unlock the reservation object
+ * dma_resv_is_locked - is the reservation object locked
+ * @obj: the reservation object
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool dma_resv_is_locked(struct dma_resv *obj)
+{
+ return ww_mutex_is_locked(&obj->lock);
+}
+
+/**
+ * dma_resv_locking_ctx - returns the context used to lock the object
+ * @obj: the reservation object
+ *
+ * Returns the context used to lock a reservation object or NULL if no context
+ * was used or the object is not locked at all.
+ */
+static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
+{
+ return READ_ONCE(obj->lock.ctx);
+}
+
+/**
+ * dma_resv_unlock - unlock the reservation object
* @obj: the reservation object
*
* Unlocks the reservation object following exclusive access.
*/
-static inline void
-reservation_object_unlock(struct reservation_object *obj)
+static inline void dma_resv_unlock(struct dma_resv *obj)
{
#ifdef CONFIG_DEBUG_MUTEXES
/* Test shared fence slot reservation */
if (rcu_access_pointer(obj->fence)) {
- struct reservation_object_list *fence =
- reservation_object_get_list(obj);
+ struct dma_resv_list *fence = dma_resv_get_list(obj);
fence->shared_max = fence->shared_count;
}
@@ -227,7 +229,7 @@ reservation_object_unlock(struct reservation_object *obj)
}
/**
- * reservation_object_get_excl - get the reservation object's
+ * dma_resv_get_excl - get the reservation object's
* exclusive fence, with update-side lock held
* @obj: the reservation object
*
@@ -239,14 +241,14 @@ reservation_object_unlock(struct reservation_object *obj)
* The exclusive fence or NULL
*/
static inline struct dma_fence *
-reservation_object_get_excl(struct reservation_object *obj)
+dma_resv_get_excl(struct dma_resv *obj)
{
return rcu_dereference_protected(obj->fence_excl,
- reservation_object_held(obj));
+ dma_resv_held(obj));
}
/**
- * reservation_object_get_excl_rcu - get the reservation object's
+ * dma_resv_get_excl_rcu - get the reservation object's
* exclusive fence, without lock held.
* @obj: the reservation object
*
@@ -257,7 +259,7 @@ reservation_object_get_excl(struct reservation_object *obj)
* The exclusive fence or NULL if none
*/
static inline struct dma_fence *
-reservation_object_get_excl_rcu(struct reservation_object *obj)
+dma_resv_get_excl_rcu(struct dma_resv *obj)
{
struct dma_fence *fence;
@@ -271,27 +273,23 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
return fence;
}
-int reservation_object_reserve_shared(struct reservation_object *obj,
- unsigned int num_fences);
-void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct dma_fence *fence);
+void dma_resv_init(struct dma_resv *obj);
+void dma_resv_fini(struct dma_resv *obj);
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct dma_fence *fence);
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+ struct dma_fence **pfence_excl,
+ unsigned *pshared_count,
+ struct dma_fence ***pshared);
-int reservation_object_copy_fences(struct reservation_object *dst,
- struct reservation_object *src);
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
- bool wait_all, bool intr,
- unsigned long timeout);
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout);
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
- bool test_all);
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/export.h b/include/linux/export.h
index fd8711ed9ac4..95f55b7f83a0 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -18,9 +18,8 @@ extern struct module __this_module;
#define THIS_MODULE ((struct module *)0)
#endif
-#ifdef CONFIG_MODULES
+#define NS_SEPARATOR "."
-#if defined(__KERNEL__) && !defined(__GENKSYMS__)
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
@@ -29,13 +28,13 @@ extern struct module __this_module;
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
" .weak __crc_" #sym " \n" \
" .long __crc_" #sym " - . \n" \
- " .previous \n");
+ " .previous \n")
#else
#define __CRC_SYMBOL(sym, sec) \
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
" .weak __crc_" #sym " \n" \
" .long __crc_" #sym " \n" \
- " .previous \n");
+ " .previous \n")
#endif
#else
#define __CRC_SYMBOL(sym, sec)
@@ -49,47 +48,89 @@ extern struct module __this_module;
* absolute relocations that require runtime processing on relocatable
* kernels.
*/
+#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
+ __ADDRESSABLE(sym) \
+ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
+ " .balign 4 \n" \
+ "__ksymtab_" #sym NS_SEPARATOR #ns ": \n" \
+ " .long " #sym "- . \n" \
+ " .long __kstrtab_" #sym "- . \n" \
+ " .long __kstrtab_ns_" #sym "- . \n" \
+ " .previous \n")
+
#define __KSYMTAB_ENTRY(sym, sec) \
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
- " .balign 8 \n" \
+ " .balign 4 \n" \
"__ksymtab_" #sym ": \n" \
" .long " #sym "- . \n" \
" .long __kstrtab_" #sym "- . \n" \
+ " .long 0 \n" \
" .previous \n")
struct kernel_symbol {
int value_offset;
int name_offset;
+ int namespace_offset;
};
#else
+#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
+ static const struct kernel_symbol __ksymtab_##sym##__##ns \
+ asm("__ksymtab_" #sym NS_SEPARATOR #ns) \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
+ __aligned(sizeof(void *)) \
+ = { (unsigned long)&sym, __kstrtab_##sym, __kstrtab_ns_##sym }
+
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
+ asm("__ksymtab_" #sym) \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
- = { (unsigned long)&sym, __kstrtab_##sym }
+ __aligned(sizeof(void *)) \
+ = { (unsigned long)&sym, __kstrtab_##sym, NULL }
struct kernel_symbol {
unsigned long value;
const char *name;
+ const char *namespace;
};
#endif
-/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec) \
+#ifdef __GENKSYMS__
+
+#define ___EXPORT_SYMBOL(sym,sec) __GENKSYMS_EXPORT_SYMBOL(sym)
+#define ___EXPORT_SYMBOL_NS(sym,sec,ns) __GENKSYMS_EXPORT_SYMBOL(sym)
+
+#else
+
+#define ___export_symbol_common(sym, sec) \
extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec) \
+ __CRC_SYMBOL(sym, sec); \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
- = #sym; \
+ = #sym \
+
+/* For every exported symbol, place a struct in the __ksymtab section */
+#define ___EXPORT_SYMBOL_NS(sym, sec, ns) \
+ ___export_symbol_common(sym, sec); \
+ static const char __kstrtab_ns_##sym[] \
+ __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
+ = #ns; \
+ __KSYMTAB_ENTRY_NS(sym, sec, ns)
+
+#define ___EXPORT_SYMBOL(sym, sec) \
+ ___export_symbol_common(sym, sec); \
__KSYMTAB_ENTRY(sym, sec)
-#if defined(__DISABLE_EXPORTS)
+#endif
+
+#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
* be reused in other execution contexts such as the UEFI stub or the
* decompressor.
*/
+#define __EXPORT_SYMBOL_NS(sym, sec, ns)
#define __EXPORT_SYMBOL(sym, sec)
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
@@ -116,38 +157,43 @@ struct kernel_symbol {
#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
#define __cond_export_sym_0(sym, sec) /* nothing */
+#define __EXPORT_SYMBOL_NS(sym, sec, ns) \
+ __ksym_marker(sym); \
+ __cond_export_ns_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
+#define __cond_export_ns_sym(sym, sec, ns, conf) \
+ ___cond_export_ns_sym(sym, sec, ns, conf)
+#define ___cond_export_ns_sym(sym, sec, ns, enabled) \
+ __cond_export_ns_sym_##enabled(sym, sec, ns)
+#define __cond_export_ns_sym_1(sym, sec, ns) ___EXPORT_SYMBOL_NS(sym, sec, ns)
+#define __cond_export_ns_sym_0(sym, sec, ns) /* nothing */
+
#else
-#define __EXPORT_SYMBOL ___EXPORT_SYMBOL
-#endif
-#define EXPORT_SYMBOL(sym) \
- __EXPORT_SYMBOL(sym, "")
+#define __EXPORT_SYMBOL_NS(sym,sec,ns) ___EXPORT_SYMBOL_NS(sym,sec,ns)
+#define __EXPORT_SYMBOL(sym,sec) ___EXPORT_SYMBOL(sym,sec)
-#define EXPORT_SYMBOL_GPL(sym) \
- __EXPORT_SYMBOL(sym, "_gpl")
+#endif /* CONFIG_MODULES */
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
- __EXPORT_SYMBOL(sym, "_gpl_future")
+#ifdef DEFAULT_SYMBOL_NAMESPACE
+#undef __EXPORT_SYMBOL
+#define __EXPORT_SYMBOL(sym, sec) \
+ __EXPORT_SYMBOL_NS(sym, sec, DEFAULT_SYMBOL_NAMESPACE)
+#endif
+
+#define EXPORT_SYMBOL(sym) __EXPORT_SYMBOL(sym, "")
+#define EXPORT_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_gpl")
+#define EXPORT_SYMBOL_GPL_FUTURE(sym) __EXPORT_SYMBOL(sym, "_gpl_future")
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL_NS(sym, "", ns)
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL_NS(sym, "_gpl", ns)
#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
+#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
+#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
#else
#define EXPORT_UNUSED_SYMBOL(sym)
#define EXPORT_UNUSED_SYMBOL_GPL(sym)
#endif
-#endif /* __GENKSYMS__ */
-
-#else /* !CONFIG_MODULES... */
-
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-
-#endif /* CONFIG_MODULES */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/extable.h b/include/linux/extable.h
index 41c5b3a25f67..81ecfaa83ad3 100644
--- a/include/linux/extable.h
+++ b/include/linux/extable.h
@@ -19,6 +19,8 @@ void trim_init_extable(struct module *m);
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
+const struct exception_table_entry *
+search_kernel_exception_table(unsigned long addr);
#ifdef CONFIG_MODULES
/* For extable.c to search modules' exception tables. */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 65559900d4d7..284738996028 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -36,11 +36,17 @@
#define F2FS_MAX_QUOTAS 3
+#define F2FS_ENC_UTF8_12_1 1
+#define F2FS_ENC_STRICT_MODE_FL (1 << 0)
+#define f2fs_has_strict_mode(sbi) \
+ (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL)
+
#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
+#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1)
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
@@ -109,7 +115,9 @@ struct f2fs_super_block {
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
__u8 hot_ext_count; /* # of hot file extension */
- __u8 reserved[310]; /* valid reserved region */
+ __le16 s_encoding; /* Filename charset encoding */
+ __le16 s_encoding_flags; /* Filename charset encoding flags */
+ __u8 reserved[306]; /* valid reserved region */
__le32 crc; /* checksum of superblock */
} __packed;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 303771264644..756706b666a1 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -135,10 +135,6 @@ struct fb_cursor_user {
/* A display blank is requested */
#define FB_EVENT_BLANK 0x09
-/* A hardware display blank early change occurred */
-#define FB_EARLY_EVENT_BLANK 0x10
-/* A hardware display blank revert early change occurred */
-#define FB_R_EARLY_EVENT_BLANK 0x11
struct fb_event {
struct fb_info *info;
@@ -721,8 +717,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
extern const unsigned char *fb_firmware_edid(struct device *device);
extern void fb_edid_to_monspecs(unsigned char *edid,
struct fb_monspecs *specs);
-extern void fb_edid_add_monspecs(unsigned char *edid,
- struct fb_monspecs *specs);
extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
@@ -796,7 +790,6 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ffe35d97afcb..e0d909d35763 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -429,6 +429,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
* @i_pages: Cached pages.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings.
+ * @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
@@ -446,6 +447,10 @@ struct address_space {
struct xarray i_pages;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ /* number of thp, only for non-shmem files */
+ atomic_t nr_thps;
+#endif
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
@@ -732,6 +737,8 @@ struct inode {
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
+
static inline unsigned int i_blocksize(const struct inode *node)
{
return (1 << node->i_blkbits);
@@ -1161,6 +1168,11 @@ extern void lease_get_mtime(struct inode *, struct timespec64 *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);
+
+struct notifier_block;
+extern int lease_register_notifier(struct notifier_block *);
+extern void lease_unregister_notifier(struct notifier_block *);
+
struct files_struct;
extern void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files);
@@ -1458,6 +1470,9 @@ struct super_block {
/* Granularity of c/m/atime in ns (cannot be worse than a second) */
u32 s_time_gran;
+ /* Time limits for c/m/atime in seconds */
+ time64_t s_time_min;
+ time64_t s_time_max;
#ifdef CONFIG_FSNOTIFY
__u32 s_fsnotify_mask;
struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
@@ -2793,6 +2808,33 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
return errseq_sample(&mapping->wb_err);
}
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ return atomic_read(&mapping->nr_thps);
+#else
+ return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_inc(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(1);
+#endif
+}
+
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_dec(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(1);
+#endif
+}
+
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 84a5eaa09f19..e5c14e2c53d3 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -95,7 +95,6 @@ struct fs_context {
const struct cred *cred; /* The mounter's credentials */
struct fc_log *log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
- const char *subtype; /* The subtype to set on the superblock */
void *security; /* Linux S&M options */
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
@@ -141,6 +140,7 @@ extern void put_fs_context(struct fs_context *fc);
*/
enum vfs_get_super_keying {
vfs_get_single_super, /* Only one such superblock may exist */
+ vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */
vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */
vfs_get_independent_super, /* Multiple independent superblocks may exist */
};
@@ -155,6 +155,9 @@ extern int get_tree_nodev(struct fs_context *fc,
extern int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
+extern int get_tree_single_reconf(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
extern int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 2de3b2ddd19a..1915bdba2fad 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -475,6 +475,8 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
+/* Wait until all marks queued for destruction are destroyed */
+extern void fsnotify_wait_marks_destroyed(void);
/* run all the marks in a group, and clear all of the marks attached to given object type */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
/* run all the marks in a group, and clear all of the vfsmount marks */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f33881688f42..fb07b503dc45 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node);
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d770ab1a0479..cd41f209043f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1154,29 +1154,32 @@ int hid_pidff_init(struct hid_device *hid);
#define hid_pidff_init NULL
#endif
-#define dbg_hid(format, arg...) \
+#define dbg_hid(fmt, ...) \
do { \
if (hid_debug) \
- printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \
+ printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \
} while (0)
-#define hid_printk(level, hid, fmt, arg...) \
- dev_printk(level, &(hid)->dev, fmt, ##arg)
-#define hid_emerg(hid, fmt, arg...) \
- dev_emerg(&(hid)->dev, fmt, ##arg)
-#define hid_crit(hid, fmt, arg...) \
- dev_crit(&(hid)->dev, fmt, ##arg)
-#define hid_alert(hid, fmt, arg...) \
- dev_alert(&(hid)->dev, fmt, ##arg)
-#define hid_err(hid, fmt, arg...) \
- dev_err(&(hid)->dev, fmt, ##arg)
-#define hid_notice(hid, fmt, arg...) \
- dev_notice(&(hid)->dev, fmt, ##arg)
-#define hid_warn(hid, fmt, arg...) \
- dev_warn(&(hid)->dev, fmt, ##arg)
-#define hid_info(hid, fmt, arg...) \
- dev_info(&(hid)->dev, fmt, ##arg)
-#define hid_dbg(hid, fmt, arg...) \
- dev_dbg(&(hid)->dev, fmt, ##arg)
+#define hid_err(hid, fmt, ...) \
+ dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice(hid, fmt, ...) \
+ dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn(hid, fmt, ...) \
+ dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info(hid, fmt, ...) \
+ dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg(hid, fmt, ...) \
+ dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__)
+
+#define hid_err_once(hid, fmt, ...) \
+ dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice_once(hid, fmt, ...) \
+ dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_once(hid, fmt, ...) \
+ dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info_once(hid, fmt, ...) \
+ dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg_once(hid, fmt, ...) \
+ dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__)
#endif
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 7ef56dc18050..3fec513b9c00 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -84,15 +84,12 @@
* @notifiers: count of active mmu notifiers
*/
struct hmm {
- struct mm_struct *mm;
- struct kref kref;
+ struct mmu_notifier mmu_notifier;
spinlock_t ranges_lock;
struct list_head ranges;
struct list_head mirrors;
- struct mmu_notifier mmu_notifier;
struct rw_semaphore mirrors_sem;
wait_queue_head_t wq;
- struct rcu_head rcu;
long notifiers;
};
@@ -158,13 +155,11 @@ enum hmm_pfn_value_e {
* @values: pfn value for some special case (none, special, error, ...)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
- * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function
*/
struct hmm_range {
struct hmm *hmm;
- struct vm_area_struct *vma;
struct list_head list;
unsigned long start;
unsigned long end;
@@ -173,32 +168,11 @@ struct hmm_range {
const uint64_t *values;
uint64_t default_flags;
uint64_t pfn_flags_mask;
- uint8_t page_shift;
uint8_t pfn_shift;
bool valid;
};
/*
- * hmm_range_page_shift() - return the page shift for the range
- * @range: range being queried
- * Return: page shift (page size = 1 << page shift) for the range
- */
-static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
-{
- return range->page_shift;
-}
-
-/*
- * hmm_range_page_size() - return the page size for the range
- * @range: range being queried
- * Return: page size for the range in bytes
- */
-static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
-{
- return 1UL << hmm_range_page_shift(range);
-}
-
-/*
* hmm_range_wait_until_valid() - wait for range to be valid
* @range: range affected by invalidation to wait on
* @timeout: time out for wait in ms (ie abort wait after that period of time)
@@ -291,40 +265,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
}
/*
- * Old API:
- * hmm_pfn_to_page()
- * hmm_pfn_to_pfn()
- * hmm_pfn_from_page()
- * hmm_pfn_from_pfn()
- *
- * This are the OLD API please use new API, it is here to avoid cross-tree
- * merge painfullness ie we convert things to new API in stages.
- */
-static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
- uint64_t pfn)
-{
- return hmm_device_entry_to_page(range, pfn);
-}
-
-static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
- uint64_t pfn)
-{
- return hmm_device_entry_to_pfn(range, pfn);
-}
-
-static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
- struct page *page)
-{
- return hmm_device_entry_from_page(range, page);
-}
-
-static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
- unsigned long pfn)
-{
- return hmm_device_entry_from_pfn(range, pfn);
-}
-
-/*
* Mirroring: how to synchronize device page table with CPU page table.
*
* A device driver that is participating in HMM mirroring must always
@@ -375,29 +315,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
- * enum hmm_update_event - type of update
- * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
- */
-enum hmm_update_event {
- HMM_UPDATE_INVALIDATE,
-};
-
-/*
- * struct hmm_update - HMM update information for callback
- *
- * @start: virtual start address of the range to update
- * @end: virtual end address of the range to update
- * @event: event triggering the update (what is happening)
- * @blockable: can the callback block/sleep ?
- */
-struct hmm_update {
- unsigned long start;
- unsigned long end;
- enum hmm_update_event event;
- bool blockable;
-};
-
-/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
* @update: callback to update range on a device
@@ -417,9 +334,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
- * @update: update information (see struct hmm_update)
- * Return: -EAGAIN if update.blockable false and callback need to
- * block, 0 otherwise.
+ * @update: update information (see struct mmu_notifier_range)
+ * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
+ * and callback needs to block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@@ -430,8 +347,9 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
- int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
- const struct hmm_update *update);
+ int (*sync_cpu_device_pagetables)(
+ struct hmm_mirror *mirror,
+ const struct mmu_notifier_range *update);
};
/*
@@ -457,20 +375,24 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
-int hmm_range_register(struct hmm_range *range,
- struct hmm_mirror *mirror,
- unsigned long start,
- unsigned long end,
- unsigned page_shift);
+int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
void hmm_range_unregister(struct hmm_range *range);
-long hmm_range_snapshot(struct hmm_range *range);
-long hmm_range_fault(struct hmm_range *range, bool block);
+
+/*
+ * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
+ */
+#define HMM_FAULT_ALLOW_RETRY (1 << 0)
+
+/* Don't fault in missing PTEs, just snapshot the current state. */
+#define HMM_FAULT_SNAPSHOT (1 << 1)
+
+long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+
long hmm_range_dma_map(struct hmm_range *range,
struct device *device,
dma_addr_t *daddrs,
- bool block);
+ unsigned int flags);
long hmm_range_dma_unmap(struct hmm_range *range,
- struct vm_area_struct *vma,
struct device *device,
dma_addr_t *daddrs,
bool dirty);
@@ -484,13 +406,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-/* Below are for HMM internal use only! Not to be used by device driver! */
-static inline void hmm_mm_init(struct mm_struct *mm)
-{
- mm->hmm = NULL;
-}
-#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-static inline void hmm_mm_init(struct mm_struct *mm) {}
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
#endif /* LINUX_HMM_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 45ede62aa85b..61c9ffd89b05 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -267,6 +267,15 @@ static inline bool thp_migration_supported(void)
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
+static inline struct list_head *page_deferred_list(struct page *page)
+{
+ /*
+ * Global or memcg deferred list in the second tail pages is
+ * occupied by compound_head.
+ */
+ return &page[2].deferred_list;
+}
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edfca4278319..53fc34f930d0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
static inline struct hstate *page_hstate(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
- return size_to_hstate(PAGE_SIZE << compound_order(page));
+ return size_to_hstate(page_size(page));
}
static inline unsigned hstate_index_to_shift(unsigned index)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2afe6fdc1dda..b4a017093b69 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -245,7 +245,10 @@ struct vmbus_channel_offer {
} pipe;
} u;
/*
- * The sub_channel_index is defined in win8.
+ * The sub_channel_index is defined in Win8: a value of zero means a
+ * primary channel and a value of non-zero means a sub-channel.
+ *
+ * Before Win8, the field is reserved, meaning it's always zero.
*/
u16 sub_channel_index;
u16 reserved3;
@@ -423,6 +426,9 @@ enum vmbus_channel_message_type {
CHANNELMSG_COUNT
};
+/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
+#define INVALID_RELID U32_MAX
+
struct vmbus_channel_message_header {
enum vmbus_channel_message_type msgtype;
u32 padding;
@@ -934,6 +940,11 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c)
VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
}
+static inline bool is_sub_channel(const struct vmbus_channel *c)
+{
+ return c->offermsg.offer.sub_channel_index != 0;
+}
+
static inline void set_channel_affinity_state(struct vmbus_channel *c,
enum hv_numa_policy policy)
{
@@ -1149,6 +1160,9 @@ struct hv_driver {
int (*remove)(struct hv_device *);
void (*shutdown)(struct hv_device *);
+ int (*suspend)(struct hv_device *);
+ int (*resume)(struct hv_device *);
+
};
/* Base device object */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c0a78c069117..1361637c369d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -473,7 +473,7 @@ extern struct i2c_client *
devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
extern struct i2c_client *
-i2c_new_secondary_device(struct i2c_client *client,
+i2c_new_ancillary_device(struct i2c_client *client,
const char *name,
u16 default_addr);
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index ea1f50ce2e49..bb331e6356a9 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -10,7 +10,8 @@
#include <linux/iio/iio.h>
#include <linux/irqreturn.h>
-#include <linux/mfd/cros_ec.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
enum {
CROS_EC_SENSOR_X,
diff --git a/include/linux/ima.h b/include/linux/ima.h
index a20ad398d260..1c37f17f7203 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -131,4 +131,13 @@ static inline int ima_inode_removexattr(struct dentry *dentry,
return 0;
}
#endif /* CONFIG_IMA_APPRAISE */
+
+#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
+extern bool ima_appraise_signature(enum kernel_read_file_id func);
+#else
+static inline bool ima_appraise_signature(enum kernel_read_file_id func)
+{
+ return false;
+}
+#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/input.h b/include/linux/input.h
index 510e78558c10..94f277cd806a 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -21,6 +21,8 @@
#include <linux/timer.h>
#include <linux/mod_devicetable.h>
+struct input_dev_poller;
+
/**
* struct input_value - input value representation
* @type: type of value (EV_KEY, EV_ABS, etc)
@@ -33,6 +35,13 @@ struct input_value {
__s32 value;
};
+enum input_clock_type {
+ INPUT_CLK_REAL = 0,
+ INPUT_CLK_MONO,
+ INPUT_CLK_BOOT,
+ INPUT_CLK_MAX
+};
+
/**
* struct input_dev - represents an input device
* @name: name of the device
@@ -64,6 +73,8 @@ struct input_value {
* not sleep
* @ff: force feedback structure associated with the device if device
* supports force feedback effects
+ * @poller: poller structure associated with the device if device is
+ * set up to use polling mode
* @repeat_key: stores key code of the last key pressed; used to implement
* software autorepeat
* @timer: timer for software autorepeat
@@ -114,6 +125,8 @@ struct input_value {
* @vals: array of values queued in the current frame
* @devres_managed: indicates that devices is managed with devres framework
* and needs not be explicitly unregistered or freed.
+ * @timestamp: storage for a timestamp set by input_set_timestamp called
+ * by a driver
*/
struct input_dev {
const char *name;
@@ -147,6 +160,8 @@ struct input_dev {
struct ff_device *ff;
+ struct input_dev_poller *poller;
+
unsigned int repeat_key;
struct timer_list timer;
@@ -184,6 +199,8 @@ struct input_dev {
struct input_value *vals;
bool devres_managed;
+
+ ktime_t timestamp[INPUT_CLK_MAX];
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -361,6 +378,12 @@ void input_unregister_device(struct input_dev *);
void input_reset_device(struct input_dev *);
+int input_setup_polling(struct input_dev *dev,
+ void (*poll_fn)(struct input_dev *dev));
+void input_set_poll_interval(struct input_dev *dev, unsigned int interval);
+void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval);
+void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval);
+
int __must_check input_register_handler(struct input_handler *);
void input_unregister_handler(struct input_handler *);
@@ -382,6 +405,9 @@ void input_close_device(struct input_handle *);
int input_flush_device(struct input_handle *handle, struct file *file);
+void input_set_timestamp(struct input_dev *dev, ktime_t timestamp);
+ktime_t *input_get_timestamp(struct input_dev *dev);
+
void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
deleted file mode 100644
index 7e5b7e978e8a..000000000000
--- a/include/linux/input/bu21013.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- */
-
-#ifndef _BU21013_H
-#define _BU21013_H
-
-/**
- * struct bu21013_platform_device - Handle the platform data
- * @touch_x_max: touch x max
- * @touch_y_max: touch y max
- * @cs_pin: chip select pin
- * @touch_pin: touch gpio pin
- * @ext_clk: external clock flag
- * @x_flip: x flip flag
- * @y_flip: y flip flag
- * @wakeup: wakeup flag
- *
- * This is used to handle the platform data
- */
-struct bu21013_platform_device {
- int touch_x_max;
- int touch_y_max;
- unsigned int cs_pin;
- unsigned int touch_pin;
- bool ext_clk;
- bool x_flip;
- bool y_flip;
- bool wakeup;
-};
-
-#endif
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 855476145fe1..aaa8a0767aa3 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -30,26 +30,8 @@
\
/* Callbacks for augmented rbtree insert and remove */ \
\
-static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
-{ \
- ITTYPE max = ITLAST(node), subtree_last; \
- if (node->ITRB.rb_left) { \
- subtree_last = rb_entry(node->ITRB.rb_left, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- if (node->ITRB.rb_right) { \
- subtree_last = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- return max; \
-} \
- \
-RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
- ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
\
/* Insert / remove interval nodes from the tree */ \
\
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index bc499ceae392..7aa5d6117936 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -188,10 +188,14 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
-typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
- unsigned flags);
+
+struct iomap_dio_ops {
+ int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
+ unsigned flags);
+};
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
#ifdef CONFIG_SWAP
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 5b6a7121c9f0..7bddddfc76d6 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -297,6 +297,8 @@ static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
+struct resource *request_free_mem_region(struct resource *base,
+ unsigned long size, const char *name);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index df03825ad1a1..603fbc4e2f70 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1410,8 +1410,6 @@ extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_force_commit_nested(journal_t *);
-extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
-extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
extern int jbd2_journal_inode_ranged_write(handle_t *handle,
struct jbd2_inode *inode, loff_t start_byte,
loff_t length);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 4fa360a13c1e..d83d403dac2e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -217,7 +217,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
- * context (spinlock, irq-handler, ...).
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
+ * not allowed can be annotated with non_block_start() and non_block_end()
+ * pairs.
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
@@ -233,6 +235,23 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
+/**
+ * non_block_start - annotate the start of section where sleeping is prohibited
+ *
+ * This is on behalf of the oom reaper, specifically when it is calling the mmu
+ * notifiers. The problem is that if the notifier were to block on, for example,
+ * mutex_lock() and if the process which holds that mutex were to perform a
+ * sleeping memory allocation, the oom reaper is now blocked on completion of
+ * that memory allocation. Other blocking calls like wait_event() pose similar
+ * issues.
+ */
+# define non_block_start() (current->non_block_count++)
+/**
+ * non_block_end - annotate the end of section where sleeping is prohibited
+ *
+ * Closes a section opened by non_block_start().
+ */
+# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
@@ -241,6 +260,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
+# define non_block_start() do { } while (0)
+# define non_block_end() do { } while (0)
#endif
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0b809258ed3..1776eb2e43a4 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -125,7 +125,7 @@ typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long cmdline_len);
typedef int (kexec_cleanup_t)(void *loader_data);
-#ifdef CONFIG_KEXEC_VERIFY_SIG
+#ifdef CONFIG_KEXEC_SIG
typedef int (kexec_verify_sig_t)(const char *kernel_buf,
unsigned long kernel_len);
#endif
@@ -134,7 +134,7 @@ struct kexec_file_ops {
kexec_probe_t *probe;
kexec_load_t *load;
kexec_cleanup_t *cleanup;
-#ifdef CONFIG_KEXEC_VERIFY_SIG
+#ifdef CONFIG_KEXEC_SIG
kexec_verify_sig_t *verify_sig;
#endif
};
@@ -183,6 +183,8 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fbf144aaa749..b072aeb1fd78 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -326,8 +326,10 @@ extern atomic_t kgdb_active;
(raw_smp_processor_id() == atomic_read(&kgdb_active))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
+extern void kgdb_panic(const char *msg);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
+static inline void kgdb_panic(const char *msg) {}
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 082d1d2a5216..bc45ea1efbf7 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -15,6 +15,14 @@ extern int __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
unsigned long vm_flags);
+#ifdef CONFIG_SHMEM
+extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+#else
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
+{
+}
+#endif
#define khugepaged_enabled() \
(transparent_hugepage_flags & \
@@ -73,6 +81,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
{
return 0;
}
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
+{
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 851eee8fff25..238fb1dfed98 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -41,16 +41,6 @@ struct lcd_ops {
/* Get the LCD panel power status (0: full on, 1..3: controller
power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
int (*get_power)(struct lcd_device *);
- /*
- * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
- * and this callback would be called proir to fb driver's callback.
- *
- * P.S. note that if early_set_power is not NULL then early fb notifier
- * would be registered.
- */
- int (*early_set_power)(struct lcd_device *, int power);
- /* revert the effects of the early blank event. */
- int (*r_early_set_power)(struct lcd_device *, int power);
/* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
int (*set_power)(struct lcd_device *, int power);
/* Get the current contrast setting (0-max_contrast) */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 7a64b3ddb408..b6eddf912568 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -160,8 +160,11 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
-enum nvdimm_security_state {
- NVDIMM_SECURITY_ERROR = -1,
+/*
+ * Note that separate bits for locked + unlocked are defined so that
+ * 'flags == 0' corresponds to an error / not-supported state.
+ */
+enum nvdimm_security_bits {
NVDIMM_SECURITY_DISABLED,
NVDIMM_SECURITY_UNLOCKED,
NVDIMM_SECURITY_LOCKED,
@@ -182,7 +185,7 @@ enum nvdimm_passphrase_type {
};
struct nvdimm_security_ops {
- enum nvdimm_security_state (*state)(struct nvdimm *nvdimm,
+ unsigned long (*get_flags)(struct nvdimm *nvdimm,
enum nvdimm_passphrase_type pass_type);
int (*freeze)(struct nvdimm *nvdimm);
int (*change_key)(struct nvdimm *nvdimm,
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index df1318d85f7d..a3763247547c 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -339,6 +339,9 @@
* Check for permission to change root directory.
* @path contains the path structure.
* Return 0 if permission is granted.
+ * @path_notify:
+ * Check permissions before setting a watch on events as defined by @mask,
+ * on an object at @path, whose type is defined by @obj_type.
* @inode_readlink:
* Check the permission to read the symbolic link.
* @dentry contains the dentry structure for the file link.
@@ -1446,6 +1449,11 @@
* @bpf_prog_free_security:
* Clean up the security information stored inside bpf prog.
*
+ * @locked_down
+ * Determine whether a kernel feature that potentially enables arbitrary
+ * code execution in kernel space should be permitted.
+ *
+ * @what: kernel feature being accessed
*/
union security_list_options {
int (*binder_set_context_mgr)(struct task_struct *mgr);
@@ -1535,7 +1543,9 @@ union security_list_options {
int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
int (*path_chroot)(const struct path *path);
#endif
-
+ /* Needed for inode based security check */
+ int (*path_notify)(const struct path *path, u64 mask,
+ unsigned int obj_type);
int (*inode_alloc_security)(struct inode *inode);
void (*inode_free_security)(struct inode *inode);
int (*inode_init_security)(struct inode *inode, struct inode *dir,
@@ -1807,6 +1817,7 @@ union security_list_options {
int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux);
void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
#endif /* CONFIG_BPF_SYSCALL */
+ int (*locked_down)(enum lockdown_reason what);
};
struct security_hook_heads {
@@ -1860,6 +1871,8 @@ struct security_hook_heads {
struct hlist_head path_chown;
struct hlist_head path_chroot;
#endif
+ /* Needed for inode based modules as well */
+ struct hlist_head path_notify;
struct hlist_head inode_alloc_security;
struct hlist_head inode_free_security;
struct hlist_head inode_init_security;
@@ -2046,6 +2059,7 @@ struct security_hook_heads {
struct hlist_head bpf_prog_alloc_security;
struct hlist_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
+ struct hlist_head locked_down;
} __randomize_layout;
/*
@@ -2104,12 +2118,18 @@ struct lsm_info {
};
extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
+extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
__used __section(.lsm_info.init) \
__aligned(sizeof(unsigned long))
+#define DEFINE_EARLY_LSM(lsm) \
+ static struct lsm_info __early_lsm_##lsm \
+ __used __section(.early_lsm_info.init) \
+ __aligned(sizeof(unsigned long))
+
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
/*
* Assuring the safety of deleting a security module is up to
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index ccb73422c2fa..e6f54ef6698b 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -20,6 +20,9 @@
#define CMDQ_WFE_WAIT BIT(15)
#define CMDQ_WFE_WAIT_VALUE 0x1
+/** cmdq event maximum */
+#define CMDQ_MAX_EVENT 0x3ff
+
/*
* CMDQ_CODE_MASK:
* set write mask
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 470bd53a89df..5c4a18a91f89 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -18,23 +18,10 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-#define sme_me_mask 0ULL
-
-static inline bool sme_active(void) { return false; }
-static inline bool sev_active(void) { return false; }
+static inline bool mem_encrypt_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
-static inline bool mem_encrypt_active(void)
-{
- return sme_me_mask;
-}
-
-static inline u64 sme_get_me_mask(void)
-{
- return sme_me_mask;
-}
-
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad8f1a397ae4..9b60863429cc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -128,9 +128,8 @@ struct mem_cgroup_per_node {
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
-#ifdef CONFIG_MEMCG_KMEM
struct memcg_shrinker_map __rcu *shrinker_map;
-#endif
+
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
@@ -331,6 +330,10 @@ struct mem_cgroup {
struct list_head event_list;
spinlock_t event_list_lock;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct deferred_split deferred_split_queue;
+#endif
+
struct mem_cgroup_per_node *nodeinfo[0];
/* WARNING: nodeinfo must be the last member here */
};
@@ -1311,6 +1314,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
}
+
+extern int memcg_expand_shrinker_maps(int new_id);
+
+extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1319,6 +1327,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
}
+
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
+{
+}
#endif
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
@@ -1390,10 +1403,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id);
#else
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1435,8 +1444,6 @@ static inline void memcg_put_cache_ids(void)
{
}
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id) { }
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 02e633f3ede0..0ebb105eb261 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -25,7 +25,6 @@
struct memory_block {
unsigned long start_section_nr;
- unsigned long end_section_nr;
unsigned long state; /* serialized by the dev->lock */
int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
@@ -80,9 +79,9 @@ struct mem_section;
#define IPC_CALLBACK_PRI 10
#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
-static inline int memory_dev_init(void)
+static inline void memory_dev_init(void)
{
- return 0;
+ return;
}
static inline int register_memory_notifier(struct notifier_block *nb)
{
@@ -113,7 +112,7 @@ extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
-extern int memory_dev_init(void);
+extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index bac395f1d00a..5228c62af416 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f8a5b2a19945..bef51e35d8d2 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -109,10 +109,8 @@ struct dev_pagemap {
struct percpu_ref *ref;
struct percpu_ref internal_ref;
struct completion done;
- struct device *dev;
enum memory_type type;
unsigned int flags;
- u64 pci_p2pdma_bus_offset;
const struct dev_pagemap_ops *ops;
};
@@ -124,6 +122,8 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
}
#ifdef CONFIG_ZONE_DEVICE
+void *memremap_pages(struct dev_pagemap *pgmap, int nid);
+void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 77805c3f2de7..61c2875c2a40 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -8,183 +8,11 @@
#ifndef __LINUX_MFD_CROS_EC_H
#define __LINUX_MFD_CROS_EC_H
-#include <linux/cdev.h>
#include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/mfd/cros_ec_commands.h>
-#include <linux/mutex.h>
-
-#define CROS_EC_DEV_NAME "cros_ec"
-#define CROS_EC_DEV_FP_NAME "cros_fp"
-#define CROS_EC_DEV_PD_NAME "cros_pd"
-#define CROS_EC_DEV_TP_NAME "cros_tp"
-#define CROS_EC_DEV_ISH_NAME "cros_ish"
-#define CROS_EC_DEV_SCP_NAME "cros_scp"
-
-/*
- * The EC is unresponsive for a time after a reboot command. Add a
- * simple delay to make sure that the bus stays locked.
- */
-#define EC_REBOOT_DELAY_MS 50
-
-/*
- * Max bus-specific overhead incurred by request/responses.
- * I2C requires 1 additional byte for requests.
- * I2C requires 2 additional bytes for responses.
- * SPI requires up to 32 additional bytes for responses.
- */
-#define EC_PROTO_VERSION_UNKNOWN 0
-#define EC_MAX_REQUEST_OVERHEAD 1
-#define EC_MAX_RESPONSE_OVERHEAD 32
-
-/*
- * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
- */
-enum {
- EC_MSG_TX_HEADER_BYTES = 3,
- EC_MSG_TX_TRAILER_BYTES = 1,
- EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES +
- EC_MSG_TX_TRAILER_BYTES,
- EC_MSG_RX_PROTO_BYTES = 3,
-
- /* Max length of messages for proto 2*/
- EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
- EC_MSG_TX_PROTO_BYTES,
-
- EC_MAX_MSG_BYTES = 64 * 1024,
-};
-
-/**
- * struct cros_ec_command - Information about a ChromeOS EC command.
- * @version: Command version number (often 0).
- * @command: Command to send (EC_CMD_...).
- * @outsize: Outgoing length in bytes.
- * @insize: Max number of bytes to accept from the EC.
- * @result: EC's response to the command (separate from communication failure).
- * @data: Where to put the incoming data from EC and outgoing data to EC.
- */
-struct cros_ec_command {
- uint32_t version;
- uint32_t command;
- uint32_t outsize;
- uint32_t insize;
- uint32_t result;
- uint8_t data[0];
-};
-
-/**
- * struct cros_ec_device - Information about a ChromeOS EC device.
- * @phys_name: Name of physical comms layer (e.g. 'i2c-4').
- * @dev: Device pointer for physical comms device
- * @was_wake_device: True if this device was set to wake the system from
- * sleep at the last suspend.
- * @cros_class: The class structure for this device.
- * @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
- * @offset: Is within EC_LPC_ADDR_MEMMAP region.
- * @bytes: Number of bytes to read. zero means "read a string" (including
- * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be
- * read. Caller must ensure that the buffer is large enough for the
- * result when reading a string.
- * @max_request: Max size of message requested.
- * @max_response: Max size of message response.
- * @max_passthru: Max sice of passthru message.
- * @proto_version: The protocol version used for this device.
- * @priv: Private data.
- * @irq: Interrupt to use.
- * @id: Device id.
- * @din: Input buffer (for data from EC). This buffer will always be
- * dword-aligned and include enough space for up to 7 word-alignment
- * bytes also, so we can ensure that the body of the message is always
- * dword-aligned (64-bit). We use this alignment to keep ARM and x86
- * happy. Probably word alignment would be OK, there might be a small
- * performance advantage to using dword.
- * @dout: Output buffer (for data to EC). This buffer will always be
- * dword-aligned and include enough space for up to 7 word-alignment
- * bytes also, so we can ensure that the body of the message is always
- * dword-aligned (64-bit). We use this alignment to keep ARM and x86
- * happy. Probably word alignment would be OK, there might be a small
- * performance advantage to using dword.
- * @din_size: Size of din buffer to allocate (zero to use static din).
- * @dout_size: Size of dout buffer to allocate (zero to use static dout).
- * @wake_enabled: True if this device can wake the system from sleep.
- * @suspended: True if this device had been suspended.
- * @cmd_xfer: Send command to EC and get response.
- * Returns the number of bytes received if the communication
- * succeeded, but that doesn't mean the EC was happy with the
- * command. The caller should check msg.result for the EC's result
- * code.
- * @pkt_xfer: Send packet to EC and get response.
- * @lock: One transaction at a time.
- * @mkbp_event_supported: True if this EC supports the MKBP event protocol.
- * @host_sleep_v1: True if this EC supports the sleep v1 command.
- * @event_notifier: Interrupt event notifier for transport devices.
- * @event_data: Raw payload transferred with the MKBP event.
- * @event_size: Size in bytes of the event data.
- * @host_event_wake_mask: Mask of host events that cause wake from suspend.
- */
-struct cros_ec_device {
- /* These are used by other drivers that want to talk to the EC */
- const char *phys_name;
- struct device *dev;
- bool was_wake_device;
- struct class *cros_class;
- int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
- unsigned int bytes, void *dest);
-
- /* These are used to implement the platform-specific interface */
- u16 max_request;
- u16 max_response;
- u16 max_passthru;
- u16 proto_version;
- void *priv;
- int irq;
- u8 *din;
- u8 *dout;
- int din_size;
- int dout_size;
- bool wake_enabled;
- bool suspended;
- int (*cmd_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
- int (*pkt_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
- struct mutex lock;
- bool mkbp_event_supported;
- bool host_sleep_v1;
- struct blocking_notifier_head event_notifier;
-
- struct ec_response_get_next_event_v1 event_data;
- int event_size;
- u32 host_event_wake_mask;
- u32 last_resume_result;
-};
-
-/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
- * @sensor_num: Id of the sensor, as reported by the EC.
- */
-struct cros_ec_sensor_platform {
- u8 sensor_num;
-};
-
-/**
- * struct cros_ec_platform - ChromeOS EC platform information.
- * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
- * used in /dev/ and sysfs.
- * @cmd_offset: Offset to apply for each command. Set when
- * registering a device behind another one.
- */
-struct cros_ec_platform {
- const char *ec_name;
- u16 cmd_offset;
-};
-
-struct cros_ec_debugfs;
/**
* struct cros_ec_dev - ChromeOS EC device entry point.
* @class_dev: Device structure used in sysfs.
- * @cdev: Character device structure in /dev.
* @ec_dev: cros_ec_device structure to talk to the physical device.
* @dev: Pointer to the platform device.
* @debug_info: cros_ec_debugfs structure for debugging information.
@@ -194,7 +22,6 @@ struct cros_ec_debugfs;
*/
struct cros_ec_dev {
struct device class_dev;
- struct cdev cdev;
struct cros_ec_device *ec_dev;
struct device *dev;
struct cros_ec_debugfs *debug_info;
@@ -205,123 +32,4 @@ struct cros_ec_dev {
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
-/**
- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
- * @ec_dev: Device to suspend.
- *
- * This can be called by drivers to handle a suspend event.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_suspend(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
- * @ec_dev: Device to resume.
- *
- * This can be called by drivers to handle a resume event.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_resume(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
- * @ec_dev: Device to register.
- * @msg: Message to write.
- *
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_check_result() - Check ec_msg->result.
- * @ec_dev: EC device.
- * @msg: Message to check.
- *
- * This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_check_result(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * Call this to send a command to the ChromeOS EC. This should be used
- * instead of calling the EC's cmd_xfer() callback directly.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * This function is identical to cros_ec_cmd_xfer, except it returns success
- * status only if both the command was transmitted successfully and the EC
- * replied with success status. It's not necessary to check msg->result when
- * using this function.
- *
- * Return: The number of bytes transferred on success or negative error code.
- */
-int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
- * @ec_dev: Device to register.
- *
- * Before calling this, allocate a pointer to a new device and then fill
- * in all the fields up to the --private-- marker.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_register(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_query_all() - Query the protocol version supported by the
- * ChromeOS EC.
- * @ec_dev: Device to register.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_query_all(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
- * @ec_dev: Device to fetch event from.
- * @wake_event: Pointer to a bool set to true upon return if the event might be
- * treated as a wake event. Ignored if null.
- *
- * Return: negative error code on errors; 0 for no data; or else number of
- * bytes received (i.e., an event was retrieved successfully). Event types are
- * written out to @ec_dev->event_data.event_type on success.
- */
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
-
-/**
- * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
- * @ec_dev: Device to fetch event from.
- *
- * When MKBP is supported, when the EC raises an interrupt, we collect the
- * events raised and call the functions in the ec notifier. This function
- * is a helper to know which events are raised.
- *
- * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
- */
-u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
deleted file mode 100644
index 085edbf7601b..000000000000
--- a/include/linux/mfd/da9063/pdata.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Platform configuration options for DA9063
- *
- * Copyright 2012 Dialog Semiconductor Ltd.
- *
- * Author: Michal Hajduk, Dialog Semiconductor
- * Author: Krystian Garbaciak, Dialog Semiconductor
- */
-
-#ifndef __MFD_DA9063_PDATA_H__
-#define __MFD_DA9063_PDATA_H__
-
-/*
- * RGB LED configuration
- */
-/* LED IDs for flags in struct led_info. */
-enum {
- DA9063_GPIO11_LED,
- DA9063_GPIO14_LED,
- DA9063_GPIO15_LED,
-
- DA9063_LED_NUM
-};
-#define DA9063_LED_ID_MASK 0x3
-
-/* LED polarity for flags in struct led_info. */
-#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0
-#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4
-
-
-/*
- * General PMIC configuration
- */
-/* HWMON ADC channels configuration */
-#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010
-#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020
-#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040
-#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080
-#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100
-#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200
-#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400
-#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800
-
-/* Disable register caching. */
-#define DA9063_FLG_NO_CACHE 0x0008
-
-struct da9063;
-
-/* DA9063 platform data */
-struct da9063_pdata {
- int (*init)(struct da9063 *da9063);
- int irq_base;
- bool key_power;
- unsigned flags;
- struct da9063_regulators_pdata *regulators_pdata;
- struct led_platform_data *leds_pdata;
-};
-
-#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/intel_soc_pmic_mrfld.h b/include/linux/mfd/intel_soc_pmic_mrfld.h
new file mode 100644
index 000000000000..4daecd682275
--- /dev/null
+++ b/include/linux/mfd/intel_soc_pmic_mrfld.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for Intel Merrifield Basin Cove PMIC
+ *
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_SOC_PMIC_MRFLD_H__
+#define __INTEL_SOC_PMIC_MRFLD_H__
+
+#include <linux/bits.h>
+
+#define BCOVE_ID 0x00
+
+#define BCOVE_ID_MINREV0 GENMASK(2, 0)
+#define BCOVE_ID_MAJREV0 GENMASK(5, 3)
+#define BCOVE_ID_VENDID0 GENMASK(7, 6)
+
+#define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0)
+#define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3)
+#define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6)
+
+#define BCOVE_IRQLVL1 0x01
+
+#define BCOVE_PBIRQ 0x02
+#define BCOVE_TMUIRQ 0x03
+#define BCOVE_THRMIRQ 0x04
+#define BCOVE_BCUIRQ 0x05
+#define BCOVE_ADCIRQ 0x06
+#define BCOVE_CHGRIRQ0 0x07
+#define BCOVE_CHGRIRQ1 0x08
+#define BCOVE_GPIOIRQ 0x09
+#define BCOVE_CRITIRQ 0x0B
+
+#define BCOVE_MIRQLVL1 0x0C
+
+#define BCOVE_MPBIRQ 0x0D
+#define BCOVE_MTMUIRQ 0x0E
+#define BCOVE_MTHRMIRQ 0x0F
+#define BCOVE_MBCUIRQ 0x10
+#define BCOVE_MADCIRQ 0x11
+#define BCOVE_MCHGRIRQ0 0x12
+#define BCOVE_MCHGRIRQ1 0x13
+#define BCOVE_MGPIOIRQ 0x14
+#define BCOVE_MCRITIRQ 0x16
+
+#define BCOVE_SCHGRIRQ0 0x4E
+#define BCOVE_SCHGRIRQ1 0x4F
+
+/* Level 1 IRQs */
+#define BCOVE_LVL1_PWRBTN BIT(0) /* power button */
+#define BCOVE_LVL1_TMU BIT(1) /* time management unit */
+#define BCOVE_LVL1_THRM BIT(2) /* thermal */
+#define BCOVE_LVL1_BCU BIT(3) /* burst control unit */
+#define BCOVE_LVL1_ADC BIT(4) /* ADC */
+#define BCOVE_LVL1_CHGR BIT(5) /* charger */
+#define BCOVE_LVL1_GPIO BIT(6) /* GPIO */
+#define BCOVE_LVL1_CRIT BIT(7) /* critical event */
+
+/* Level 2 IRQs: power button */
+#define BCOVE_PBIRQ_PBTN BIT(0)
+#define BCOVE_PBIRQ_UBTN BIT(1)
+
+/* Level 2 IRQs: ADC */
+#define BCOVE_ADCIRQ_BATTEMP BIT(2)
+#define BCOVE_ADCIRQ_SYSTEMP BIT(3)
+#define BCOVE_ADCIRQ_BATTID BIT(4)
+#define BCOVE_ADCIRQ_VIBATT BIT(5)
+#define BCOVE_ADCIRQ_CCTICK BIT(7)
+
+/* Level 2 IRQs: charger */
+#define BCOVE_CHGRIRQ_BAT0ALRT BIT(4)
+#define BCOVE_CHGRIRQ_BAT1ALRT BIT(5)
+#define BCOVE_CHGRIRQ_BATCRIT BIT(6)
+
+#define BCOVE_CHGRIRQ_VBUSDET BIT(0)
+#define BCOVE_CHGRIRQ_DCDET BIT(1)
+#define BCOVE_CHGRIRQ_BATTDET BIT(2)
+#define BCOVE_CHGRIRQ_USBIDDET BIT(3)
+
+#endif /* __INTEL_SOC_PMIC_MRFLD_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 25a95e72179b..fc88d315bdde 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -7,6 +7,14 @@
#ifndef __MFD_MT6397_CORE_H__
#define __MFD_MT6397_CORE_H__
+#include <linux/mutex.h>
+
+enum chip_id {
+ MT6323_CHIP_ID = 0x23,
+ MT6391_CHIP_ID = 0x91,
+ MT6397_CHIP_ID = 0x97,
+};
+
enum mt6397_irq_numbers {
MT6397_IRQ_SPKL_AB = 0,
MT6397_IRQ_SPKR_AB,
@@ -54,6 +62,9 @@ struct mt6397_chip {
u16 irq_masks_cache[2];
u16 int_con[2];
u16 int_status[2];
+ u16 chip_id;
};
+int mt6397_irq_init(struct mt6397_chip *chip);
+
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 8cfda0554381..112dc66262cc 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,12 +17,18 @@
struct device_node;
#ifdef CONFIG_MFD_SYSCON
+extern struct regmap *device_node_to_regmap(struct device_node *np);
extern struct regmap *syscon_node_to_regmap(struct device_node *np);
extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
extern struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property);
#else
+static inline struct regmap *device_node_to_regmap(struct device_node *np)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
{
return ERR_PTR(-ENOTSUPP);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 7f04754c7f2b..72120061b7d4 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -166,8 +166,6 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
#define MIGRATE_PFN_MIGRATE (1UL << 1)
#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
-#define MIGRATE_PFN_DEVICE (1UL << 4)
-#define MIGRATE_PFN_ERROR (1UL << 5)
#define MIGRATE_PFN_SHIFT 6
static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
@@ -182,107 +180,27 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}
-/*
- * struct migrate_vma_ops - migrate operation callback
- *
- * @alloc_and_copy: alloc destination memory and copy source memory to it
- * @finalize_and_map: allow caller to map the successfully migrated pages
- *
- *
- * The alloc_and_copy() callback happens once all source pages have been locked,
- * unmapped and checked (checked whether pinned or not). All pages that can be
- * migrated will have an entry in the src array set with the pfn value of the
- * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
- * flags might be set but should be ignored by the callback).
- *
- * The alloc_and_copy() callback can then allocate destination memory and copy
- * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
- * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
- * callback must update each corresponding entry in the dst array with the pfn
- * value of the destination page and with the MIGRATE_PFN_VALID and
- * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
- * locked, via lock_page()).
- *
- * At this point the alloc_and_copy() callback is done and returns.
- *
- * Note that the callback does not have to migrate all the pages that are
- * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
- * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
- * set in the src array entry). If the device driver cannot migrate a device
- * page back to system memory, then it must set the corresponding dst array
- * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
- * access any of the virtual addresses originally backed by this page. Because
- * a SIGBUS is such a severe result for the userspace process, the device
- * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
- * unrecoverable state.
- *
- * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
- * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
- * allowing device driver to allocate device memory for those unback virtual
- * address. For this the device driver simply have to allocate device memory
- * and properly set the destination entry like for regular migration. Note that
- * this can still fails and thus inside the device driver must check if the
- * migration was successful for those entry inside the finalize_and_map()
- * callback just like for regular migration.
- *
- * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
- * OR BAD THINGS WILL HAPPEN !
- *
- *
- * The finalize_and_map() callback happens after struct page migration from
- * source to destination (destination struct pages are the struct pages for the
- * memory allocated by the alloc_and_copy() callback). Migration can fail, and
- * thus the finalize_and_map() allows the driver to inspect which pages were
- * successfully migrated, and which were not. Successfully migrated pages will
- * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
- *
- * It is safe to update device page table from within the finalize_and_map()
- * callback because both destination and source page are still locked, and the
- * mmap_sem is held in read mode (hence no one can unmap the range being
- * migrated).
- *
- * Once callback is done cleaning up things and updating its page table (if it
- * chose to do so, this is not an obligation) then it returns. At this point,
- * the HMM core will finish up the final steps, and the migration is complete.
- *
- * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
- * ENTRIES OR BAD THINGS WILL HAPPEN !
- */
-struct migrate_vma_ops {
- void (*alloc_and_copy)(struct vm_area_struct *vma,
- const unsigned long *src,
- unsigned long *dst,
- unsigned long start,
- unsigned long end,
- void *private);
- void (*finalize_and_map)(struct vm_area_struct *vma,
- const unsigned long *src,
- const unsigned long *dst,
- unsigned long start,
- unsigned long end,
- void *private);
+struct migrate_vma {
+ struct vm_area_struct *vma;
+ /*
+ * Both src and dst array must be big enough for
+ * (end - start) >> PAGE_SHIFT entries.
+ *
+ * The src array must not be modified by the caller after
+ * migrate_vma_setup(), and must not change the dst array after
+ * migrate_vma_pages() returns.
+ */
+ unsigned long *dst;
+ unsigned long *src;
+ unsigned long cpages;
+ unsigned long npages;
+ unsigned long start;
+ unsigned long end;
};
-#if defined(CONFIG_MIGRATE_VMA_HELPER)
-int migrate_vma(const struct migrate_vma_ops *ops,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- unsigned long *src,
- unsigned long *dst,
- void *private);
-#else
-static inline int migrate_vma(const struct migrate_vma_ops *ops,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- unsigned long *src,
- unsigned long *dst,
- void *private)
-{
- return -EINVAL;
-}
-#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
+int migrate_vma_setup(struct migrate_vma *args);
+void migrate_vma_pages(struct migrate_vma *migrate);
+void migrate_vma_finalize(struct migrate_vma *migrate);
#endif /* CONFIG_MIGRATION */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f3773e8536bb..cc1c230f10ee 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -328,6 +328,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
+ MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
@@ -345,6 +346,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+ MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
@@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err {
u8 syndrome;
};
+struct mlx5_eqe_xrq_err {
+ __be32 reserved1[5];
+ __be32 type_xrqn;
+ __be32 reserved2;
+};
+
struct mlx5_eqe_port_state {
u8 reserved0[8];
u8 port;
@@ -698,6 +706,7 @@ union ev_data {
struct mlx5_eqe_pps pps;
struct mlx5_eqe_dct dct;
struct mlx5_eqe_temp_warning temp_warning;
+ struct mlx5_eqe_xrq_err xrq_err;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0334ca97c584..cc292273e6ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -805,6 +805,24 @@ static inline void set_compound_order(struct page *page, unsigned int order)
page[1].compound_order = order;
}
+/* Returns the number of pages in this potentially compound page. */
+static inline unsigned long compound_nr(struct page *page)
+{
+ return 1UL << compound_order(page);
+}
+
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
+{
+ return PAGE_SIZE << compound_order(page);
+}
+
+/* Returns the number of bits needed for the number of bytes in a page */
+static inline unsigned int page_shift(struct page *page)
+{
+ return PAGE_SHIFT + compound_order(page);
+}
+
void free_compound_page(struct page *page);
#ifdef CONFIG_MMU
@@ -1057,8 +1075,9 @@ static inline void put_user_page(struct page *page)
put_page(page);
}
-void put_user_pages_dirty(struct page **pages, unsigned long npages);
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty);
+
void put_user_pages(struct page **pages, unsigned long npages);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -1405,7 +1424,11 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
+#else
+static inline bool can_do_mlock(void) { return false; }
+#endif
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
@@ -1430,54 +1453,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);
-/**
- * mm_walk - callbacks for walk_page_range
- * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
- * this handler should only handle pud_trans_huge() puds.
- * the pmd_entry or pte_entry callbacks will be used for
- * regular PUDs.
- * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
- * this handler is required to be able to handle
- * pmd_trans_huge() pmds. They may simply choose to
- * split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
- * @pte_hole: if set, called for each hole at all levels
- * @hugetlb_entry: if set, called for each hugetlb entry
- * @test_walk: caller specific callback function to determine whether
- * we walk over the current vma or not. Returning 0
- * value means "do page table walk over the current vma,"
- * and a negative one means "abort current page table walk
- * right now." 1 means "skip the current vma."
- * @mm: mm_struct representing the target process of page table walk
- * @vma: vma currently walked (NULL if walking outside vmas)
- * @private: private data for callbacks' usage
- *
- * (see the comment on walk_page_range() for more details)
- */
-struct mm_walk {
- int (*pud_entry)(pud_t *pud, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pte_entry)(pte_t *pte, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pte_hole)(unsigned long addr, unsigned long next,
- struct mm_walk *walk);
- int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
- unsigned long addr, unsigned long next,
- struct mm_walk *walk);
- int (*test_walk)(unsigned long addr, unsigned long next,
- struct mm_walk *walk);
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- void *private;
-};
-
struct mmu_notifier_range;
-int walk_page_range(unsigned long addr, unsigned long end,
- struct mm_walk *walk);
-int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
@@ -1972,7 +1949,7 @@ static inline void pgtable_init(void)
pgtable_cache_init();
}
-static inline bool pgtable_page_ctor(struct page *page)
+static inline bool pgtable_pte_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
@@ -1981,7 +1958,7 @@ static inline bool pgtable_page_ctor(struct page *page)
return true;
}
-static inline void pgtable_page_dtor(struct page *page)
+static inline void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
@@ -2351,6 +2328,8 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
+unsigned long randomize_stack_top(unsigned long stack_top);
+
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2614,6 +2593,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
+#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
/*
* NOTE on FOLL_LONGTERM:
@@ -2891,5 +2871,12 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
+extern int memcmp_pages(struct page *page1, struct page *page2);
+
+static inline int pages_identical(struct page *page1, struct page *page2)
+{
+ return !memcmp_pages(page1, page2);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6a7a1083b6fb..2222fa795284 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -25,7 +25,6 @@
struct address_space;
struct mem_cgroup;
-struct hmm;
/*
* Each physical page in the system has a struct page associated with
@@ -139,6 +138,7 @@ struct page {
struct { /* Second tail page of compound page */
unsigned long _compound_pad_1; /* compound_head */
unsigned long _compound_pad_2;
+ /* For both global and memcg */
struct list_head deferred_list;
};
struct { /* Page table pages */
@@ -383,6 +383,16 @@ struct mm_struct {
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
+#ifdef CONFIG_MEMBARRIER
+ /**
+ * @membarrier_state: Flags controlling membarrier behavior.
+ *
+ * This field is close to @pgd to hopefully fit in the same
+ * cache-line, which needs to be touched by switch_mm().
+ */
+ atomic_t membarrier_state;
+#endif
+
/**
* @mm_users: The number of users including userspace.
*
@@ -452,9 +462,7 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access */
struct core_state *core_state; /* coredumping support */
-#ifdef CONFIG_MEMBARRIER
- atomic_t membarrier_state;
-#endif
+
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
@@ -511,11 +519,6 @@ struct mm_struct {
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
-
-#ifdef CONFIG_HMM_MIRROR
- /* HMM needs to track a few things per mm */
- struct hmm *hmm;
-#endif
} __randomize_layout;
/*
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index d7016dcb245e..c1bc6731125c 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -36,6 +36,10 @@ struct vmacache {
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
+/*
+ * When updating this, please also update struct resident_page_types[] in
+ * kernel/fork.c
+ */
enum {
MM_FILEPAGES, /* Resident file mapping pages */
MM_ANONPAGES, /* Resident anonymous pages */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4704b77259ee..ba703384bea0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -368,6 +368,7 @@ struct mmc_host {
#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
+#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
int fixed_drv_type; /* fixed driver type for non-removable media */
@@ -397,6 +398,7 @@ struct mmc_host {
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
+ unsigned int can_dma_map_merge:1; /* merging can be used */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b6c004bd9f6a..1bd8e6a09a3c 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -42,6 +42,10 @@ enum mmu_notifier_event {
#ifdef CONFIG_MMU_NOTIFIER
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
/*
* The mmu notifier_mm structure is allocated and installed in
* mm->mmu_notifier_mm inside the mm_take_all_locks() protected
@@ -211,6 +215,19 @@ struct mmu_notifier_ops {
*/
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long start, unsigned long end);
+
+ /*
+ * These callbacks are used with the get/put interface to manage the
+ * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
+ * notifier for use with the mm.
+ *
+ * free_notifier() is only called after the mmu_notifier has been
+ * fully put, calls to any ops callback are prevented and no ops
+ * callbacks are currently running. It is called from a SRCU callback
+ * and cannot sleep.
+ */
+ struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
+ void (*free_notifier)(struct mmu_notifier *mn);
};
/*
@@ -227,6 +244,9 @@ struct mmu_notifier_ops {
struct mmu_notifier {
struct hlist_node hlist;
const struct mmu_notifier_ops *ops;
+ struct mm_struct *mm;
+ struct rcu_head rcu;
+ unsigned int users;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -234,14 +254,27 @@ static inline int mm_has_notifiers(struct mm_struct *mm)
return unlikely(mm->mmu_notifier_mm);
}
+struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
+ struct mm_struct *mm);
+static inline struct mmu_notifier *
+mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
+{
+ struct mmu_notifier *ret;
+
+ down_write(&mm->mmap_sem);
+ ret = mmu_notifier_get_locked(ops, mm);
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+void mmu_notifier_put(struct mmu_notifier *mn);
+void mmu_notifier_synchronize(void);
+
extern int mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern int __mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern void mmu_notifier_unregister(struct mmu_notifier *mn,
struct mm_struct *mm);
-extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
- struct mm_struct *mm);
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
@@ -310,25 +343,36 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
+ might_sleep();
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) {
range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
__mmu_notifier_invalidate_range_start(range);
}
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
+ int ret = 0;
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) {
range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
- return __mmu_notifier_invalidate_range_start(range);
+ ret = __mmu_notifier_invalidate_range_start(range);
}
- return 0;
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+ return ret;
}
static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
+ if (mmu_notifier_range_blockable(range))
+ might_sleep();
+
if (mm_has_notifiers(range->mm))
__mmu_notifier_invalidate_range_end(range, false);
}
@@ -482,9 +526,6 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
set_pte_at(___mm, ___address, __ptep, ___pte); \
})
-extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
- void (*func)(struct rcu_head *rcu));
-
#else /* CONFIG_MMU_NOTIFIER */
struct mmu_notifier_range {
@@ -581,6 +622,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
#define set_pte_at_notify set_pte_at
+static inline void mmu_notifier_synchronize(void)
+{
+}
+
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* _LINUX_MMU_NOTIFIER_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3f38c30d2f13..bda20282746b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -235,6 +235,8 @@ enum node_stat_item {
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
+ NR_FILE_THPS,
+ NR_FILE_PMDMAPPED,
NR_ANON_THPS,
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
@@ -677,6 +679,14 @@ struct zonelist {
extern struct page *mem_map;
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct deferred_split {
+ spinlock_t split_queue_lock;
+ struct list_head split_queue;
+ unsigned long split_queue_len;
+};
+#endif
+
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
@@ -756,9 +766,7 @@ typedef struct pglist_data {
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- spinlock_t split_queue_lock;
- struct list_head split_queue;
- unsigned long split_queue_len;
+ struct deferred_split deferred_split_queue;
#endif
/* Fields commonly accessed by the page reclaim scanner */
diff --git a/include/linux/module.h b/include/linux/module.h
index 1455812dd325..6d20895e7739 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -26,9 +26,6 @@
#include <linux/percpu.h>
#include <asm/module.h>
-/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
-#define MODULE_SIG_STRING "~Module signature appended~\n"
-
/* Not Yet Implemented */
#define MODULE_SUPPORTED_DEVICE(name)
@@ -276,6 +273,8 @@ extern typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns)
+
struct notifier_block;
#ifdef CONFIG_MODULES
diff --git a/include/linux/module_signature.h b/include/linux/module_signature.h
new file mode 100644
index 000000000000..7eb4b00381ac
--- /dev/null
+++ b/include/linux/module_signature.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Module signature handling.
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_MODULE_SIGNATURE_H
+#define _LINUX_MODULE_SIGNATURE_H
+
+#include <linux/types.h>
+
+/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+#define MODULE_SIG_STRING "~Module signature appended~\n"
+
+enum pkey_id_type {
+ PKEY_ID_PGP, /* OpenPGP generated key ID */
+ PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
+ PKEY_ID_PKCS7, /* Signature in PKCS#7 message */
+};
+
+/*
+ * Module signature information block.
+ *
+ * The constituents of the signature section are, in order:
+ *
+ * - Signer's name
+ * - Key identifier
+ * - Signature data
+ * - Information block
+ */
+struct module_signature {
+ u8 algo; /* Public-key crypto algorithm [0] */
+ u8 hash; /* Digest algorithm [0] */
+ u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
+ u8 signer_len; /* Length of signer's name [0] */
+ u8 key_id_len; /* Length of key identifier [0] */
+ u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
+};
+
+int mod_check_sig(const struct module_signature *ms, size_t file_len,
+ const char *name);
+
+#endif /* _LINUX_MODULE_SIGNATURE_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 4ca8c1c845fb..249e8d9bfbcd 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -189,6 +189,9 @@ struct module; /* only needed for owner field in mtd_info */
*/
struct mtd_debug_info {
struct dentry *dfs_dir;
+
+ const char *partname;
+ const char *partid;
};
struct mtd_info {
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index cebc38b6d6f5..0c7483843a32 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -346,7 +346,7 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
}
/**
- * nanddev_neraseblocks() - Get the total number of erasablocks
+ * nanddev_neraseblocks() - Get the total number of eraseblocks
* @nand: NAND device
*
* Return: the total number of eraseblocks exposed by @nand.
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index 01306ebe266d..d2c3cf29e0d1 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -5,6 +5,9 @@
* Copyright (C) 2008 Dmitry Baryshkov
*/
+#ifndef _MTD_SHARPSL_H
+#define _MTD_SHARPSL_H
+
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
@@ -16,3 +19,5 @@ struct sharpsl_nand_platform_data {
unsigned int nr_partitions;
const char *const *part_parsers;
};
+
+#endif /* _MTD_SHARPSL_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9f57cdfcc93d..fc0b4b19c900 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -9,6 +9,7 @@
#include <linux/bitops.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
+#include <linux/spi/spi-mem.h>
/*
* Manufacturer IDs
@@ -224,7 +225,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-#define SPI_NOR_MAX_CMD_SIZE 8
enum spi_nor_ops {
SPI_NOR_OPS_READ = 0,
SPI_NOR_OPS_WRITE,
@@ -237,12 +237,12 @@ enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
SNOR_F_HAS_SR_TB = BIT(1),
SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
- SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
- SNOR_F_READY_XSR_RDY = BIT(4),
- SNOR_F_USE_CLSR = BIT(5),
- SNOR_F_BROKEN_RESET = BIT(6),
- SNOR_F_4B_OPCODES = BIT(7),
- SNOR_F_HAS_4BAIT = BIT(8),
+ SNOR_F_READY_XSR_RDY = BIT(3),
+ SNOR_F_USE_CLSR = BIT(4),
+ SNOR_F_BROKEN_RESET = BIT(5),
+ SNOR_F_4B_OPCODES = BIT(6),
+ SNOR_F_HAS_4BAIT = BIT(7),
+ SNOR_F_HAS_LOCK = BIT(8),
};
/**
@@ -334,6 +334,195 @@ struct spi_nor_erase_map {
};
/**
+ * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
+ * supported by the SPI controller (bus master).
+ * @mask: the bitmask listing all the supported hw capabilies
+ */
+struct spi_nor_hwcaps {
+ u32 mask;
+};
+
+/*
+ *(Fast) Read capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * As a matter of performances, it is relevant to use Octal SPI protocols first,
+ * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
+ * (Slow) Read.
+ */
+#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
+#define SNOR_HWCAPS_READ BIT(0)
+#define SNOR_HWCAPS_READ_FAST BIT(1)
+#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
+
+#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3)
+#define SNOR_HWCAPS_READ_1_1_2 BIT(3)
+#define SNOR_HWCAPS_READ_1_2_2 BIT(4)
+#define SNOR_HWCAPS_READ_2_2_2 BIT(5)
+#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6)
+
+#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7)
+#define SNOR_HWCAPS_READ_1_1_4 BIT(7)
+#define SNOR_HWCAPS_READ_1_4_4 BIT(8)
+#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
+#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
+
+#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
+#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
+#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
+#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+
+/*
+ * Page Program capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the
+ * legacy SPI 1-1-1 protocol.
+ * Note that Dual Page Programs are not supported because there is no existing
+ * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
+ * implements such commands.
+ */
+#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
+#define SNOR_HWCAPS_PP BIT(16)
+
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+
+#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+
+#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \
+ SNOR_HWCAPS_READ_4_4_4 | \
+ SNOR_HWCAPS_READ_8_8_8 | \
+ SNOR_HWCAPS_PP_4_4_4 | \
+ SNOR_HWCAPS_PP_8_8_8)
+
+#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \
+ SNOR_HWCAPS_READ_1_2_2_DTR | \
+ SNOR_HWCAPS_READ_1_4_4_DTR | \
+ SNOR_HWCAPS_READ_1_8_8_DTR)
+
+#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
+ SNOR_HWCAPS_PP_MASK)
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octal SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octal SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */
+struct spi_nor;
+
+/**
+ * struct spi_nor_locking_ops - SPI NOR locking methods
+ * @lock: lock a region of the SPI NOR.
+ * @unlock: unlock a region of the SPI NOR.
+ * @is_locked: check if a region of the SPI NOR is completely locked
+ */
+struct spi_nor_locking_ops {
+ int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+};
+
+/**
+ * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
+ * Includes legacy flash parameters and settings that can be overwritten
+ * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
+ * Serial Flash Discoverable Parameters (SFDP) tables.
+ *
+ * @size: the flash memory density in bytes.
+ * @page_size: the page size of the SPI NOR flash memory.
+ * @hwcaps: describes the read and page program hardware
+ * capabilities.
+ * @reads: read capabilities ordered by priority: the higher index
+ * in the array, the higher priority.
+ * @page_programs: page program capabilities ordered by priority: the
+ * higher index in the array, the higher priority.
+ * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
+ * Table.
+ * @quad_enable: enables SPI NOR quad mode.
+ * @set_4byte: puts the SPI NOR in 4 byte addressing mode.
+ * @convert_addr: converts an absolute address into something the flash
+ * will understand. Particularly useful when pagesize is
+ * not a power-of-2.
+ * @setup: configures the SPI NOR memory. Useful for SPI NOR
+ * flashes that have peculiarities to the SPI NOR standard
+ * e.g. different opcodes, specific address calculation,
+ * page size, etc.
+ * @locking_ops: SPI NOR locking methods.
+ */
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ struct spi_nor_erase_map erase_map;
+
+ int (*quad_enable)(struct spi_nor *nor);
+ int (*set_4byte)(struct spi_nor *nor, bool enable);
+ u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
+ int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+
+ const struct spi_nor_locking_ops *locking_ops;
+};
+
+/**
* struct flash_info - Forward declaration of a structure used internally by
* spi_nor_scan()
*/
@@ -344,6 +533,10 @@ struct flash_info;
* @mtd: point to a mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
* @dev: point to a spi device, or a spi nor controller device.
+ * @spimem: point to the spi mem device
+ * @bouncebuf: bounce buffer used when the buffer passed by the MTD
+ * layer is not DMA-able
+ * @bouncebuf_size: size of the bounce buffer
* @info: spi-nor part JDEC MFR id and other info
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
@@ -356,8 +549,6 @@ struct flash_info;
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
- * @cmd_buf: used by the write_reg
- * @erase_map: the erase map of the SPI NOR
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
* @unprepare: [OPTIONAL] do some post work after the
@@ -369,19 +560,21 @@ struct flash_info;
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
* at the offset @offs; if not provided by the driver,
* spi-nor will send the erase opcode via write_reg()
- * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
- * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
- * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
- * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode
* @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from
* the SPI NOR Status Register.
- * completely locked
+ * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
+ * The structure includes legacy flash parameters and
+ * settings that can be overwritten by the spi_nor_fixups
+ * hooks, or dynamically when parsing the SFDP tables.
* @priv: the private data
*/
struct spi_nor {
struct mtd_info mtd;
struct mutex lock;
struct device *dev;
+ struct spi_mem *spimem;
+ u8 *bouncebuf;
+ size_t bouncebuf_size;
const struct flash_info *info;
u32 page_size;
u8 addr_width;
@@ -394,8 +587,6 @@ struct spi_nor {
enum spi_nor_protocol reg_proto;
bool sst_write_second;
u32 flags;
- u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
- struct spi_nor_erase_map erase_map;
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@@ -408,11 +599,8 @@ struct spi_nor {
size_t len, const u_char *write_buf);
int (*erase)(struct spi_nor *nor, loff_t offs);
- int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*quad_enable)(struct spi_nor *nor);
int (*clear_sr_bp)(struct spi_nor *nor);
+ struct spi_nor_flash_parameter params;
void *priv;
};
@@ -443,7 +631,7 @@ spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
{
- return !!nor->erase_map.uniform_erase_type;
+ return !!nor->params.erase_map.uniform_erase_type;
}
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
@@ -458,67 +646,6 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
}
/**
- * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
- * supported by the SPI controller (bus master).
- * @mask: the bitmask listing all the supported hw capabilies
- */
-struct spi_nor_hwcaps {
- u32 mask;
-};
-
-/*
- *(Fast) Read capabilities.
- * MUST be ordered by priority: the higher bit position, the higher priority.
- * As a matter of performances, it is relevant to use Octal SPI protocols first,
- * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
- * (Slow) Read.
- */
-#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
-#define SNOR_HWCAPS_READ BIT(0)
-#define SNOR_HWCAPS_READ_FAST BIT(1)
-#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
-
-#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3)
-#define SNOR_HWCAPS_READ_1_1_2 BIT(3)
-#define SNOR_HWCAPS_READ_1_2_2 BIT(4)
-#define SNOR_HWCAPS_READ_2_2_2 BIT(5)
-#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6)
-
-#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7)
-#define SNOR_HWCAPS_READ_1_1_4 BIT(7)
-#define SNOR_HWCAPS_READ_1_4_4 BIT(8)
-#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
-#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
-
-#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11)
-#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
-#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
-#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
-#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
-
-/*
- * Page Program capabilities.
- * MUST be ordered by priority: the higher bit position, the higher priority.
- * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the
- * legacy SPI 1-1-1 protocol.
- * Note that Dual Page Programs are not supported because there is no existing
- * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
- * implements such commands.
- */
-#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
-#define SNOR_HWCAPS_PP BIT(16)
-
-#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
-#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
-#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
-#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
-
-#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20)
-#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
-#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
-#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
-
-/**
* spi_nor_scan() - scan the SPI NOR
* @nor: the spi_nor structure
* @name: the chip type name
diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h
index 42db3f8e8136..3608a6c36fac 100644
--- a/include/linux/mtd/super.h
+++ b/include/linux/mtd/super.h
@@ -17,9 +17,6 @@
extern int get_tree_mtd(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
-extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
extern void kill_mtd_super(struct super_block *sb);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 0a11712a80e3..570a60c2f4f4 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -490,6 +490,9 @@ extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
+extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr,
+ struct nfs4_label *label);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr, struct nfs4_label *label);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 98d904961b33..10f81629b9ce 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -6,6 +6,8 @@
#ifndef _NVME_FC_DRIVER_H
#define _NVME_FC_DRIVER_H 1
+#include <linux/scatterlist.h>
+
/*
* ********************** LLDD FC-NVME Host API ********************
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 09592951725c..682fd465df06 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -18,6 +18,7 @@ struct page_ext_operations {
enum page_ext_flags {
PAGE_EXT_OWNER,
+ PAGE_EXT_OWNER_ACTIVE,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c7552459a15f..37a4d9e32cd3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -333,6 +333,16 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
+static inline struct page *find_subpage(struct page *page, pgoff_t offset)
+{
+ if (PageHuge(page))
+ return page;
+
+ VM_BUG_ON_PAGE(PageTail(page), page);
+
+ return page + (offset & (compound_nr(page) - 1));
+}
+
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
new file mode 100644
index 000000000000..bddd9759bab9
--- /dev/null
+++ b/include/linux/pagewalk.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PAGEWALK_H
+#define _LINUX_PAGEWALK_H
+
+#include <linux/mm.h>
+
+struct mm_walk;
+
+/**
+ * mm_walk_ops - callbacks for walk_page_range
+ * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+ * this handler should only handle pud_trans_huge() puds.
+ * the pmd_entry or pte_entry callbacks will be used for
+ * regular PUDs.
+ * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+ * this handler is required to be able to handle
+ * pmd_trans_huge() pmds. They may simply choose to
+ * split_huge_page() instead of handling it explicitly.
+ * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
+ * @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
+ * @test_walk: caller specific callback function to determine whether
+ * we walk over the current vma or not. Returning 0 means
+ * "do page table walk over the current vma", returning
+ * a negative value means "abort current page table walk
+ * right now" and returning 1 means "skip the current vma"
+ */
+struct mm_walk_ops {
+ int (*pud_entry)(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_entry)(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_hole)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*test_walk)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+};
+
+/**
+ * mm_walk - walk_page_range data
+ * @ops: operation to call during the walk
+ * @mm: mm_struct representing the target process of page table walk
+ * @vma: vma currently walked (NULL if walking outside vmas)
+ * @private: private data for callbacks' usage
+ *
+ * (see the comment on walk_page_range() for more details)
+ */
+struct mm_walk {
+ const struct mm_walk_ops *ops;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ void *private;
+};
+
+int walk_page_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private);
+int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
+ void *private);
+
+#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
deleted file mode 100644
index 67064145d76e..000000000000
--- a/include/linux/pci-aspm.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * aspm.h
- *
- * PCI Express ASPM defines and function prototypes
- *
- * Copyright (C) 2007 Intel Corp.
- * Zhang Yanmin (yanmin.zhang@intel.com)
- * Shaohua Li (shaohua.li@intel.com)
- *
- * For more information, please consult the following manuals (look at
- * http://www.pcisig.com/ for how to get them):
- *
- * PCI Express Specification
- */
-
-#ifndef LINUX_ASPM_H
-#define LINUX_ASPM_H
-
-#include <linux/pci.h>
-
-#define PCIE_LINK_STATE_L0S 1
-#define PCIE_LINK_STATE_L1 2
-#define PCIE_LINK_STATE_CLKPM 4
-
-#ifdef CONFIG_PCIEASPM
-int pci_disable_link_state(struct pci_dev *pdev, int state);
-int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-void pcie_no_aspm(void);
-#else
-static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
-{ return 0; }
-static inline void pcie_no_aspm(void) { }
-#endif
-
-#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index bca9bc3e5be7..8318a97c9c61 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -30,8 +30,10 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length);
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
-int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
+int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
bool *use_p2pdma);
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
@@ -81,11 +83,17 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
}
-static inline int pci_p2pdma_map_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir)
+static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
return 0;
}
+static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
static inline int pci_p2pdma_enable_store(const char *page,
struct pci_dev **p2p_dev, bool *use_p2pdma)
{
@@ -111,4 +119,16 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
+static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
+}
+
+static inline void pci_p2pdma_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+ pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
+}
+
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 82e4cd1b7ac3..f9088c89a534 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -6,12 +6,18 @@
* Copyright 1994, Drew Eckhardt
* Copyright 1997--1999 Martin Mares <mj@ucw.cz>
*
+ * PCI Express ASPM defines and function prototypes
+ * Copyright (c) 2007 Intel Corp.
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ * Shaohua Li (shaohua.li@intel.com)
+ *
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
*
* PCI BIOS Specification
* PCI Local Bus Specification
* PCI to PCI Bridge Specification
+ * PCI Express Specification
* PCI System Design Guide
*/
#ifndef LINUX_PCI_H
@@ -145,11 +151,6 @@ static inline const char *pci_power_name(pci_power_t state)
return pci_power_names[1 + (__force int) state];
}
-#define PCI_PM_D2_DELAY 200
-#define PCI_PM_D3_WAIT 10
-#define PCI_PM_D3COLD_WAIT 100
-#define PCI_PM_BUS_WAIT 50
-
/**
* typedef pci_channel_state_t
*
@@ -418,7 +419,6 @@ struct pci_dev {
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
- unsigned int has_secondary_link:1;
unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
unsigned int is_probed:1; /* Device probing in progress */
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
@@ -649,9 +649,6 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
return dev->bus->self;
}
-struct device *pci_get_host_bridge_device(struct pci_dev *dev);
-void pci_put_host_bridge_device(struct device *dev);
-
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
@@ -925,6 +922,11 @@ enum {
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
+#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
+#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
+
/* These external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
@@ -969,7 +971,7 @@ resource_size_t pcibios_align_resource(void *, const struct resource *,
resource_size_t,
resource_size_t);
-/* Weak but can be overriden by arch */
+/* Weak but can be overridden by arch */
void pci_fixup_cardbus(struct pci_bus *);
/* Generic PCI functions used internally */
@@ -995,7 +997,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
-void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug);
@@ -1241,19 +1242,12 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable);
int pci_prepare_to_sleep(struct pci_dev *dev);
int pci_back_from_sleep(struct pci_dev *dev);
bool pci_dev_run_wake(struct pci_dev *dev);
-bool pci_check_pme_status(struct pci_dev *dev);
-void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
void pci_wakeup_bus(struct pci_bus *bus);
void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
-/* PCI Virtual Channel */
-int pci_save_vc_state(struct pci_dev *dev);
-void pci_restore_vc_state(struct pci_dev *dev);
-void pci_allocate_vc_save_buffers(struct pci_dev *dev);
-
/* For use by arch with custom probe code */
void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
@@ -1297,8 +1291,6 @@ int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
/* drivers/pci/bus.c */
-struct pci_bus *pci_bus_get(struct pci_bus *bus);
-void pci_bus_put(struct pci_bus *bus);
void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset);
@@ -1408,11 +1400,6 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
-#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
-#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
-#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
-#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
-
/*
* Virtual interrupts allow for more interrupts to be allocated
* than the device has interrupts for. These are not programmed
@@ -1517,14 +1504,6 @@ static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
}
#endif
-static inline int
-pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags)
-{
- return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
- NULL);
-}
-
/**
* pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
* @d: the INTx IRQ domain
@@ -1565,10 +1544,22 @@ extern bool pcie_ports_native;
#define pcie_ports_native false
#endif
+#define PCIE_LINK_STATE_L0S 1
+#define PCIE_LINK_STATE_L1 2
+#define PCIE_LINK_STATE_CLKPM 4
+
#ifdef CONFIG_PCIEASPM
+int pci_disable_link_state(struct pci_dev *pdev, int state);
+int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
#else
+static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
#endif
@@ -1579,23 +1570,8 @@ bool pci_aer_available(void);
static inline bool pci_aer_available(void) { return false; }
#endif
-#ifdef CONFIG_PCIE_ECRC
-void pcie_set_ecrc_checking(struct pci_dev *dev);
-void pcie_ecrc_get_policy(char *str);
-#else
-static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
-static inline void pcie_ecrc_get_policy(char *str) { }
-#endif
-
bool pci_ats_disabled(void);
-#ifdef CONFIG_PCIE_PTM
-int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
-#else
-static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
-{ return -EINVAL; }
-#endif
-
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1749,11 +1725,6 @@ static inline void pci_release_regions(struct pci_dev *dev) { }
static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
-static inline void pci_block_cfg_access(struct pci_dev *dev) { }
-static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
-{ return 0; }
-static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
-
static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
{ return NULL; }
static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
@@ -1782,17 +1753,36 @@ static inline const struct pci_device_id *pci_match_id(const struct pci_device_i
struct pci_dev *dev)
{ return NULL; }
static inline bool pci_ats_disabled(void) { return true; }
+
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+ return -EINVAL;
+}
+
+static inline int
+pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ struct irq_affinity *aff_desc)
+{
+ return -ENOSPC;
+}
#endif /* CONFIG_PCI */
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
+ NULL);
+}
+
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
-void pci_ats_init(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
#else
-static inline void pci_ats_init(struct pci_dev *d) { }
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
@@ -1803,7 +1793,7 @@ static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depennding
+/* These two functions provide almost identical functionality. Depending
* on the architecture, one will be implemented as a wrapper around the
* other (in drivers/pci/mmap.c).
*
@@ -1872,25 +1862,9 @@ static inline const char *pci_name(const struct pci_dev *pdev)
return dev_name(&pdev->dev);
}
-
-/*
- * Some archs don't want to expose struct resource to userland as-is
- * in sysfs and /proc
- */
-#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
-#else
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, resource_size_t *start,
- resource_size_t *end)
-{
- *start = rsrc->start;
- *end = rsrc->end;
-}
-#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
-
/*
* The world is not perfect and supplies us with broken PCI devices.
@@ -2032,10 +2006,6 @@ extern unsigned long pci_cardbus_mem_size;
extern u8 pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
-extern unsigned long pci_hotplug_io_size;
-extern unsigned long pci_hotplug_mem_size;
-extern unsigned long pci_hotplug_bus_size;
-
/* Architecture-specific versions may override these (weak) */
void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
@@ -2305,10 +2275,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
#ifdef CONFIG_OF
struct device_node;
struct irq_domain;
-void pci_set_of_node(struct pci_dev *dev);
-void pci_release_of_node(struct pci_dev *dev);
-void pci_set_bus_of_node(struct pci_bus *bus);
-void pci_release_bus_of_node(struct pci_bus *bus);
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
@@ -2318,10 +2284,6 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
-static inline void pci_set_of_node(struct pci_dev *dev) { }
-static inline void pci_release_of_node(struct pci_dev *dev) { }
-static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
-static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
static inline int pci_parse_request_of_pci_ranges(struct device *dev,
@@ -2435,4 +2397,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#define pci_notice_ratelimited(pdev, fmt, arg...) \
dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
+#define pci_info_ratelimited(pdev, fmt, arg...) \
+ dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
+
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index f694eb2ca978..b482e42d7153 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -86,114 +86,14 @@ void pci_hp_deregister(struct hotplug_slot *slot);
#define pci_hp_initialize(slot, bus, nr, name) \
__pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME)
-/* PCI Setting Record (Type 0) */
-struct hpp_type0 {
- u32 revision;
- u8 cache_line_size;
- u8 latency_timer;
- u8 enable_serr;
- u8 enable_perr;
-};
-
-/* PCI-X Setting Record (Type 1) */
-struct hpp_type1 {
- u32 revision;
- u8 max_mem_read;
- u8 avg_max_split;
- u16 tot_max_split;
-};
-
-/* PCI Express Setting Record (Type 2) */
-struct hpp_type2 {
- u32 revision;
- u32 unc_err_mask_and;
- u32 unc_err_mask_or;
- u32 unc_err_sever_and;
- u32 unc_err_sever_or;
- u32 cor_err_mask_and;
- u32 cor_err_mask_or;
- u32 adv_err_cap_and;
- u32 adv_err_cap_or;
- u16 pci_exp_devctl_and;
- u16 pci_exp_devctl_or;
- u16 pci_exp_lnkctl_and;
- u16 pci_exp_lnkctl_or;
- u32 sec_unc_err_sever_and;
- u32 sec_unc_err_sever_or;
- u32 sec_unc_err_mask_and;
- u32 sec_unc_err_mask_or;
-};
-
-/*
- * _HPX PCI Express Setting Record (Type 3)
- */
-struct hpx_type3 {
- u16 device_type;
- u16 function_type;
- u16 config_space_location;
- u16 pci_exp_cap_id;
- u16 pci_exp_cap_ver;
- u16 pci_exp_vendor_id;
- u16 dvsec_id;
- u16 dvsec_rev;
- u16 match_offset;
- u32 match_mask_and;
- u32 match_value;
- u16 reg_offset;
- u32 reg_mask_and;
- u32 reg_mask_or;
-};
-
-struct hotplug_program_ops {
- void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp);
- void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp);
- void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp);
- void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp);
-};
-
-enum hpx_type3_dev_type {
- HPX_TYPE_ENDPOINT = BIT(0),
- HPX_TYPE_LEG_END = BIT(1),
- HPX_TYPE_RC_END = BIT(2),
- HPX_TYPE_RC_EC = BIT(3),
- HPX_TYPE_ROOT_PORT = BIT(4),
- HPX_TYPE_UPSTREAM = BIT(5),
- HPX_TYPE_DOWNSTREAM = BIT(6),
- HPX_TYPE_PCI_BRIDGE = BIT(7),
- HPX_TYPE_PCIE_BRIDGE = BIT(8),
-};
-
-enum hpx_type3_fn_type {
- HPX_FN_NORMAL = BIT(0),
- HPX_FN_SRIOV_PHYS = BIT(1),
- HPX_FN_SRIOV_VIRT = BIT(2),
-};
-
-enum hpx_type3_cfg_loc {
- HPX_CFG_PCICFG = 0,
- HPX_CFG_PCIE_CAP = 1,
- HPX_CFG_PCIE_CAP_EXT = 2,
- HPX_CFG_VEND_CAP = 3,
- HPX_CFG_DVSEC = 4,
- HPX_CFG_MAX,
-};
-
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
-int pci_acpi_program_hp_params(struct pci_dev *dev,
- const struct hotplug_program_ops *hp_ops);
bool pciehp_is_native(struct pci_dev *bridge);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
-static inline int pci_acpi_program_hp_params(struct pci_dev *dev,
- const struct hotplug_program_ops *hp_ops)
-{
- return -ENODEV;
-}
-
static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
{
return 0;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index de1b75e963ef..21a572469a4e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2134,6 +2134,7 @@
#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
@@ -2574,6 +2575,8 @@
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
+
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 86720a5a384f..7f8c7d9583d3 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -24,6 +24,7 @@ struct device;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
+extern bool pinctrl_gpio_can_use_line(unsigned gpio);
extern int pinctrl_gpio_request(unsigned gpio);
extern void pinctrl_gpio_free(unsigned gpio);
extern int pinctrl_gpio_direction_input(unsigned gpio);
@@ -61,6 +62,11 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
+static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+{
+ return true;
+}
+
static inline int pinctrl_gpio_request(unsigned gpio)
{
return 0;
diff --git a/include/linux/platform_data/cros_ec_chardev.h b/include/linux/platform_data/cros_ec_chardev.h
new file mode 100644
index 000000000000..7de8faaf77df
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_chardev.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ChromeOS EC device interface.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#ifndef _UAPI_LINUX_CROS_EC_DEV_H_
+#define _UAPI_LINUX_CROS_EC_DEV_H_
+
+#include <linux/bits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <linux/platform_data/cros_ec_commands.h>
+
+#define CROS_EC_DEV_VERSION "1.0.0"
+
+/**
+ * struct cros_ec_readmem - Struct used to read mapped memory.
+ * @offset: Within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. Zero means "read a string" (including '\0')
+ * At most only EC_MEMMAP_SIZE bytes can be read.
+ * @buffer: Where to store the result. The ioctl returns the number of bytes
+ * read or negative on error.
+ */
+struct cros_ec_readmem {
+ uint32_t offset;
+ uint32_t bytes;
+ uint8_t buffer[EC_MEMMAP_SIZE];
+};
+
+#define CROS_EC_DEV_IOC 0xEC
+#define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command)
+#define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem)
+#define CROS_EC_DEV_IOCEVENTMASK _IO(CROS_EC_DEV_IOC, 2)
+
+#endif /* _CROS_EC_DEV_H_ */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 7ccb8757b79d..98415686cbfa 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -5513,6 +5513,18 @@ struct ec_params_fp_seed {
uint8_t seed[FP_CONTEXT_TPM_BYTES];
} __ec_align4;
+#define EC_CMD_FP_ENC_STATUS 0x0409
+
+/* FP TPM seed has been set or not */
+#define FP_ENC_STATUS_SEED_SET BIT(0)
+
+struct ec_response_fp_encryption_status {
+ /* Used bits in encryption engine status */
+ uint32_t valid_flags;
+ /* Encryption engine status */
+ uint32_t status;
+} __ec_align4;
+
/*****************************************************************************/
/* Touchpad MCU commands: range 0x0500-0x05FF */
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
new file mode 100644
index 000000000000..eab7036cda09
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ChromeOS Embedded Controller protocol interface.
+ *
+ * Copyright (C) 2012 Google, Inc
+ */
+
+#ifndef __LINUX_CROS_EC_PROTO_H
+#define __LINUX_CROS_EC_PROTO_H
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+#include <linux/platform_data/cros_ec_commands.h>
+
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_FP_NAME "cros_fp"
+#define CROS_EC_DEV_ISH_NAME "cros_ish"
+#define CROS_EC_DEV_PD_NAME "cros_pd"
+#define CROS_EC_DEV_SCP_NAME "cros_scp"
+#define CROS_EC_DEV_TP_NAME "cros_tp"
+
+/*
+ * The EC is unresponsive for a time after a reboot command. Add a
+ * simple delay to make sure that the bus stays locked.
+ */
+#define EC_REBOOT_DELAY_MS 50
+
+/*
+ * Max bus-specific overhead incurred by request/responses.
+ * I2C requires 1 additional byte for requests.
+ * I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
+ */
+#define EC_PROTO_VERSION_UNKNOWN 0
+#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_RESPONSE_OVERHEAD 32
+
+/*
+ * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
+ */
+enum {
+ EC_MSG_TX_HEADER_BYTES = 3,
+ EC_MSG_TX_TRAILER_BYTES = 1,
+ EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES +
+ EC_MSG_TX_TRAILER_BYTES,
+ EC_MSG_RX_PROTO_BYTES = 3,
+
+ /* Max length of messages for proto 2*/
+ EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
+ EC_MSG_TX_PROTO_BYTES,
+
+ EC_MAX_MSG_BYTES = 64 * 1024,
+};
+
+/**
+ * struct cros_ec_command - Information about a ChromeOS EC command.
+ * @version: Command version number (often 0).
+ * @command: Command to send (EC_CMD_...).
+ * @outsize: Outgoing length in bytes.
+ * @insize: Max number of bytes to accept from the EC.
+ * @result: EC's response to the command (separate from communication failure).
+ * @data: Where to put the incoming data from EC and outgoing data to EC.
+ */
+struct cros_ec_command {
+ uint32_t version;
+ uint32_t command;
+ uint32_t outsize;
+ uint32_t insize;
+ uint32_t result;
+ uint8_t data[0];
+};
+
+/**
+ * struct cros_ec_device - Information about a ChromeOS EC device.
+ * @phys_name: Name of physical comms layer (e.g. 'i2c-4').
+ * @dev: Device pointer for physical comms device
+ * @was_wake_device: True if this device was set to wake the system from
+ * sleep at the last suspend.
+ * @cros_class: The class structure for this device.
+ * @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
+ * @offset: Is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be
+ * read. Caller must ensure that the buffer is large enough for the
+ * result when reading a string.
+ * @max_request: Max size of message requested.
+ * @max_response: Max size of message response.
+ * @max_passthru: Max sice of passthru message.
+ * @proto_version: The protocol version used for this device.
+ * @priv: Private data.
+ * @irq: Interrupt to use.
+ * @id: Device id.
+ * @din: Input buffer (for data from EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @dout: Output buffer (for data to EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @din_size: Size of din buffer to allocate (zero to use static din).
+ * @dout_size: Size of dout buffer to allocate (zero to use static dout).
+ * @wake_enabled: True if this device can wake the system from sleep.
+ * @suspended: True if this device had been suspended.
+ * @cmd_xfer: Send command to EC and get response.
+ * Returns the number of bytes received if the communication
+ * succeeded, but that doesn't mean the EC was happy with the
+ * command. The caller should check msg.result for the EC's result
+ * code.
+ * @pkt_xfer: Send packet to EC and get response.
+ * @lock: One transaction at a time.
+ * @mkbp_event_supported: True if this EC supports the MKBP event protocol.
+ * @host_sleep_v1: True if this EC supports the sleep v1 command.
+ * @event_notifier: Interrupt event notifier for transport devices.
+ * @event_data: Raw payload transferred with the MKBP event.
+ * @event_size: Size in bytes of the event data.
+ * @host_event_wake_mask: Mask of host events that cause wake from suspend.
+ * @ec: The platform_device used by the mfd driver to interface with the
+ * main EC.
+ * @pd: The platform_device used by the mfd driver to interface with the
+ * PD behind an EC.
+ */
+struct cros_ec_device {
+ /* These are used by other drivers that want to talk to the EC */
+ const char *phys_name;
+ struct device *dev;
+ bool was_wake_device;
+ struct class *cros_class;
+ int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+ /* These are used to implement the platform-specific interface */
+ u16 max_request;
+ u16 max_response;
+ u16 max_passthru;
+ u16 proto_version;
+ void *priv;
+ int irq;
+ u8 *din;
+ u8 *dout;
+ int din_size;
+ int dout_size;
+ bool wake_enabled;
+ bool suspended;
+ int (*cmd_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
+ int (*pkt_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
+ struct mutex lock;
+ bool mkbp_event_supported;
+ bool host_sleep_v1;
+ struct blocking_notifier_head event_notifier;
+
+ struct ec_response_get_next_event_v1 event_data;
+ int event_size;
+ u32 host_event_wake_mask;
+ u32 last_resume_result;
+
+ /* The platform devices used by the mfd driver */
+ struct platform_device *ec;
+ struct platform_device *pd;
+};
+
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+ u8 sensor_num;
+};
+
+/**
+ * struct cros_ec_platform - ChromeOS EC platform information.
+ * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: Offset to apply for each command. Set when
+ * registering a device behind another one.
+ */
+struct cros_ec_platform {
+ const char *ec_name;
+ u16 cmd_offset;
+};
+
+/**
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
+ *
+ * This can be called by drivers to handle a suspend event.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_suspend(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
+ *
+ * This can be called by drivers to handle a resume event.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_resume(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
+ *
+ * This is intended to be used by all ChromeOS EC drivers, but at present
+ * only SPI uses it. Once LPC uses the same protocol it can start using it.
+ * I2C could use it now, with a refactor of the existing code.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
+ *
+ * This is used by ChromeOS EC drivers to check the ec_msg->result for
+ * errors and to warn about them.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_check_result(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used
+ * instead of calling the EC's cmd_xfer() callback directly.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * Return: The number of bytes transferred on success or negative error code.
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
+ *
+ * Before calling this, allocate a pointer to a new device and then fill
+ * in all the fields up to the --private-- marker.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_register(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_unregister() - Remove a ChromeOS EC.
+ * @ec_dev: Device to unregister.
+ *
+ * Call this to deregister a ChromeOS EC, then clean up any private data.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ * @wake_event: Pointer to a bool set to true upon return if the event might be
+ * treated as a wake event. Ignored if null.
+ *
+ * Return: negative error code on errors; 0 for no data; or else number of
+ * bytes received (i.e., an event was retrieved successfully). Event types are
+ * written out to @ec_dev->event_data.event_type on success.
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
+
+/**
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ *
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
+ *
+ * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ */
+u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
+
+#endif /* __LINUX_CROS_EC_PROTO_H */
diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h
deleted file mode 100644
index 206ca4ecd93f..000000000000
--- a/include/linux/platform_data/keypad-w90p910.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_W90P910_KEYPAD_H
-#define __ASM_ARCH_W90P910_KEYPAD_H
-
-#include <linux/input/matrix_keypad.h>
-
-extern void mfp_set_groupi(struct device *dev);
-
-struct w90p910_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- unsigned int prescale;
- unsigned int debounce;
-};
-
-#endif /* __ASM_ARCH_W90P910_KEYPAD_H */
diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h
index 1cf36fdf9510..7473d3c4cabf 100644
--- a/include/linux/platform_data/pinctrl-single.h
+++ b/include/linux/platform_data/pinctrl-single.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _PINCTRL_SINGLE_H
+#define _PINCTRL_SINGLE_H
+
/**
* irq: optional wake-up interrupt
* rearm: optional soc specific rearm function
@@ -11,3 +15,5 @@ struct pcs_pdata {
int irq;
void (*rearm)(void);
};
+
+#endif /* _PINCTRL_SINGLE_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 0c587d4fc718..b5b7a3423ca8 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef __TI_SYSC_DATA_H__
#define __TI_SYSC_DATA_H__
@@ -47,6 +49,7 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_SGX BIT(18)
#define SYSC_MODULE_QUIRK_HDQ1W BIT(17)
#define SYSC_MODULE_QUIRK_I2C BIT(16)
#define SYSC_MODULE_QUIRK_WDT BIT(15)
@@ -70,7 +73,7 @@ struct sysc_regbits {
/**
* struct sysc_capabilities - capabilities for an interconnect target module
- *
+ * @type: sysc type identifier for the module
* @sysc_mask: bitmask of supported SYSCONFIG register bits
* @regbits: bitmask of SYSCONFIG register bits
* @mod_quirks: bitmask of module specific quirks
@@ -85,8 +88,9 @@ struct sysc_capabilities {
/**
* struct sysc_config - configuration for an interconnect target module
* @sysc_val: configured value for sysc register
+ * @syss_mask: configured mask value for SYSSTATUS register
* @midlemodes: bitmask of supported master idle modes
- * @sidlemodes: bitmask of supported master idle modes
+ * @sidlemodes: bitmask of supported slave idle modes
* @srst_udelay: optional delay needed after OCP soft reset
* @quirks: bitmask of enabled quirks
*/
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cefd374c47b1..c09d67edda3a 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -488,13 +488,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
- dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
-#else
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len);
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
#else
static inline void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
@@ -526,4 +519,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+/**
+ * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ *
+ * Calls print_hex_dump(), with log level of KERN_DEBUG,
+ * rowsize of 16, groupsize of 1, and ASCII output included.
+ */
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 24632a7a7d11..b2c9c460947d 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -262,7 +262,7 @@ struct pwm_ops {
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state);
+ const struct pwm_state *state);
void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
struct module *owner;
@@ -316,7 +316,7 @@ struct pwm_capture {
/* PWM user APIs */
struct pwm_device *pwm_request(int pwm_id, const char *label);
void pwm_free(struct pwm_device *pwm);
-int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 898f595ea3d6..74efca15fde7 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -225,7 +225,7 @@ struct qed_rdma_start_in_params {
struct qed_rdma_add_user_out_params {
u16 dpi;
- u64 dpi_addr;
+ void __iomem *dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 wid_count;
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
deleted file mode 100644
index 034982c98c8b..000000000000
--- a/include/linux/quicklist.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_QUICKLIST_H
-#define LINUX_QUICKLIST_H
-/*
- * Fast allocations and disposal of pages. Pages must be in the condition
- * as needed after allocation when they are freed. Per cpu lists of pages
- * are kept that only contain node local pages.
- *
- * (C) 2007, SGI. Christoph Lameter <cl@linux.com>
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/percpu.h>
-
-#ifdef CONFIG_QUICKLIST
-
-struct quicklist {
- void *page;
- int nr_pages;
-};
-
-DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
-
-/*
- * The two key functions quicklist_alloc and quicklist_free are inline so
- * that they may be custom compiled for the platform.
- * Specifying a NULL ctor can remove constructor support. Specifying
- * a constant quicklist allows the determination of the exact address
- * in the per cpu area.
- *
- * The fast patch in quicklist_alloc touched only a per cpu cacheline and
- * the first cacheline of the page itself. There is minmal overhead involved.
- */
-static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
-{
- struct quicklist *q;
- void **p = NULL;
-
- q =&get_cpu_var(quicklist)[nr];
- p = q->page;
- if (likely(p)) {
- q->page = p[0];
- p[0] = NULL;
- q->nr_pages--;
- }
- put_cpu_var(quicklist);
- if (likely(p))
- return p;
-
- p = (void *)__get_free_page(flags | __GFP_ZERO);
- if (ctor && p)
- ctor(p);
- return p;
-}
-
-static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
- struct page *page)
-{
- struct quicklist *q;
-
- q = &get_cpu_var(quicklist)[nr];
- *(void **)p = q->page;
- q->page = p;
- q->nr_pages++;
- put_cpu_var(quicklist);
-}
-
-static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
-{
- __quicklist_free(nr, dtor, pp, virt_to_page(pp));
-}
-
-static inline void quicklist_free_page(int nr, void (*dtor)(void *),
- struct page *page)
-{
- __quicklist_free(nr, dtor, page_address(page), page);
-}
-
-void quicklist_trim(int nr, void (*dtor)(void *),
- unsigned long min_pages, unsigned long max_free);
-
-unsigned long quicklist_total_size(void);
-
-#else
-
-static inline unsigned long quicklist_total_size(void)
-{
- return 0;
-}
-
-#endif
-
-#endif /* LINUX_QUICKLIST_H */
-
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index dc905a4ff8d7..185d94829701 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
/* i_mutex must being held */
static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+ return (ia->ia_valid & ATTR_SIZE) ||
(ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
(ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
}
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index ee582bdb7fda..b806a0ff6554 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -4,8 +4,7 @@
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev);
-extern struct dentry *ramfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data);
+extern int ramfs_init_fs_context(struct fs_context *fc);
#ifdef CONFIG_MMU
static inline int
@@ -17,9 +16,8 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
#endif
+extern const struct fs_parameter_description ramfs_fs_parameters;
extern const struct file_operations ramfs_file_operations;
extern const struct vm_operations_struct generic_file_vm_ops;
-int ramfs_fill_super(struct super_block *sb, void *data, int silent);
-
#endif
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 179faab29f52..fdd421b8d9ae 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,41 +60,87 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 563290fc194f..75c97e4bbc57 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -6,16 +6,11 @@
/*
* rcuwait provides a way of blocking and waking up a single
- * task in an rcu-safe manner; where it is forbidden to use
- * after exit_notify(). task_struct is not properly rcu protected,
- * unless dealing with rcu-aware lists, ie: find_task_by_*().
+ * task in an rcu-safe manner.
*
- * Alternatively we have task_rcu_dereference(), but the return
- * semantics have different implications which would break the
- * wakeup side. The only time @task is non-nil is when a user is
- * blocked (or checking if it needs to) on a condition, and reset
- * as soon as we know that the condition has succeeded and are
- * awoken.
+ * The only time @task is non-nil is when a user is blocked (or
+ * checking if it needs to) on a condition, and reset as soon as we
+ * know that the condition has succeeded and are awoken.
*/
struct rcuwait {
struct task_struct __rcu *task;
@@ -37,13 +32,6 @@ extern void rcuwait_wake_up(struct rcuwait *w);
*/
#define rcuwait_wait_event(w, condition) \
({ \
- /* \
- * Complain if we are called after do_exit()/exit_notify(), \
- * as we cannot rely on the rcu critical region for the \
- * wakeup side. \
- */ \
- WARN_ON(current->exit_state); \
- \
rcu_assign_pointer((w)->task, current); \
for (;;) { \
/* \
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
index bab671b0782f..4e78651371ba 100644
--- a/include/linux/root_dev.h
+++ b/include/linux/root_dev.h
@@ -8,6 +8,7 @@
enum {
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
+ Root_CIFS = MKDEV(UNNAMED_MAJOR, 254),
Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0),
Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1),
Root_FD0 = MKDEV(FLOPPY_MAJOR, 0),
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b75b28287005..2c2e56bd8913 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -958,6 +958,10 @@ struct task_struct {
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ int non_block_count;
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1126,7 +1130,10 @@ struct task_struct {
struct tlbflush_unmap_batch tlb_ubc;
- struct rcu_head rcu;
+ union {
+ refcount_t rcu_users;
+ struct rcu_head rcu;
+ };
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
@@ -1835,7 +1842,10 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
* running or not.
*/
#ifndef vcpu_is_preempted
-# define vcpu_is_preempted(cpu) false
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return false;
+}
#endif
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 4a7944078cc3..e6770012db18 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -362,16 +362,16 @@ enum {
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
+ if (current->mm != mm)
+ return;
if (likely(!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
return;
sync_core_before_usermode();
}
-static inline void membarrier_execve(struct task_struct *t)
-{
- atomic_set(&t->mm->membarrier_state, 0);
-}
+extern void membarrier_exec_mmap(struct mm_struct *mm);
+
#else
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
@@ -380,7 +380,7 @@ static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
{
}
#endif
-static inline void membarrier_execve(struct task_struct *t)
+static inline void membarrier_exec_mmap(struct mm_struct *mm)
{
}
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 3d90ed8f75f0..4b1c3b664f51 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -119,7 +119,7 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
-struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+void put_task_struct_rcu_user(struct task_struct *task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
diff --git a/include/linux/security.h b/include/linux/security.h
index 5f7441abbf42..a8d59d612d27 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -77,6 +77,54 @@ enum lsm_event {
LSM_POLICY_CHANGE,
};
+/*
+ * These are reasons that can be passed to the security_locked_down()
+ * LSM hook. Lockdown reasons that protect kernel integrity (ie, the
+ * ability for userland to modify kernel code) are placed before
+ * LOCKDOWN_INTEGRITY_MAX. Lockdown reasons that protect kernel
+ * confidentiality (ie, the ability for userland to extract
+ * information from the running kernel that would otherwise be
+ * restricted) are placed before LOCKDOWN_CONFIDENTIALITY_MAX.
+ *
+ * LSM authors should note that the semantics of any given lockdown
+ * reason are not guaranteed to be stable - the same reason may block
+ * one set of features in one kernel release, and a slightly different
+ * set of features in a later kernel release. LSMs that seek to expose
+ * lockdown policy at any level of granularity other than "none",
+ * "integrity" or "confidentiality" are responsible for either
+ * ensuring that they expose a consistent level of functionality to
+ * userland, or ensuring that userland is aware that this is
+ * potentially a moving target. It is easy to misuse this information
+ * in a way that could break userspace. Please be careful not to do
+ * so.
+ *
+ * If you add to this, remember to extend lockdown_reasons in
+ * security/lockdown/lockdown.c.
+ */
+enum lockdown_reason {
+ LOCKDOWN_NONE,
+ LOCKDOWN_MODULE_SIGNATURE,
+ LOCKDOWN_DEV_MEM,
+ LOCKDOWN_KEXEC,
+ LOCKDOWN_HIBERNATION,
+ LOCKDOWN_PCI_ACCESS,
+ LOCKDOWN_IOPORT,
+ LOCKDOWN_MSR,
+ LOCKDOWN_ACPI_TABLES,
+ LOCKDOWN_PCMCIA_CIS,
+ LOCKDOWN_TIOCSSERIAL,
+ LOCKDOWN_MODULE_PARAMETERS,
+ LOCKDOWN_MMIOTRACE,
+ LOCKDOWN_DEBUGFS,
+ LOCKDOWN_INTEGRITY_MAX,
+ LOCKDOWN_KCORE,
+ LOCKDOWN_KPROBES,
+ LOCKDOWN_BPF_READ,
+ LOCKDOWN_PERF,
+ LOCKDOWN_TRACEFS,
+ LOCKDOWN_CONFIDENTIALITY_MAX,
+};
+
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts);
@@ -195,6 +243,7 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
+extern int early_security_init(void);
/* Security operations */
int security_binder_set_context_mgr(struct task_struct *mgr);
@@ -259,7 +308,8 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct qstr *name,
const struct cred *old,
struct cred *new);
-
+int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
@@ -387,11 +437,11 @@ int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(char *secdata, u32 seclen);
-
void security_inode_invalidate_secctx(struct inode *inode);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
+int security_locked_down(enum lockdown_reason what);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -423,6 +473,11 @@ static inline int security_init(void)
return 0;
}
+static inline int early_security_init(void)
+{
+ return 0;
+}
+
static inline int security_binder_set_context_mgr(struct task_struct *mgr)
{
return 0;
@@ -621,6 +676,12 @@ static inline int security_move_mount(const struct path *from_path,
return 0;
}
+static inline int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ return 0;
+}
+
static inline int security_inode_alloc(struct inode *inode)
{
return 0;
@@ -1204,6 +1265,10 @@ static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32
{
return -EOPNOTSUPP;
}
+static inline int security_locked_down(enum lockdown_reason what)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#ifdef CONFIG_SECURITY_NETWORK
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 20d815a33145..de8e4b71e3ba 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -49,8 +49,9 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
/*
* Functions in mm/shmem.c called directly from elsewhere:
*/
+extern const struct fs_parameter_description shmem_fs_parameters;
extern int shmem_init(void);
-extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
+extern int shmem_init_fs_context(struct fs_context *fc);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 9443cafd1969..0f80123650e2 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -69,7 +69,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
/* ID in shrinker_idr */
int id;
#endif
@@ -81,6 +81,11 @@ struct shrinker {
/* Flags */
#define SHRINKER_NUMA_AWARE (1 << 0)
#define SHRINKER_MEMCG_AWARE (1 << 1)
+/*
+ * It just makes sense when the shrinker is also MEMCG_AWARE for now,
+ * non-MEMCG_AWARE shrinker should not have this flag set.
+ */
+#define SHRINKER_NONSLAB (1 << 2)
extern int prealloc_shrinker(struct shrinker *shrinker);
extern void register_shrinker_prepared(struct shrinker *shrinker);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 56c9c7eed34e..ab2b98ad76e1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -595,68 +595,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-struct memcg_cache_array {
- struct rcu_head rcu;
- struct kmem_cache *entries[0];
-};
-
-/*
- * This is the main placeholder for memcg-related information in kmem caches.
- * Both the root cache and the child caches will have it. For the root cache,
- * this will hold a dynamically allocated array large enough to hold
- * information about the currently limited memcgs in the system. To allow the
- * array to be accessed without taking any locks, on relocation we free the old
- * version only after a grace period.
- *
- * Root and child caches hold different metadata.
- *
- * @root_cache: Common to root and child caches. NULL for root, pointer to
- * the root cache for children.
- *
- * The following fields are specific to root caches.
- *
- * @memcg_caches: kmemcg ID indexed table of child caches. This table is
- * used to index child cachces during allocation and cleared
- * early during shutdown.
- *
- * @root_caches_node: List node for slab_root_caches list.
- *
- * @children: List of all child caches. While the child caches are also
- * reachable through @memcg_caches, a child cache remains on
- * this list until it is actually destroyed.
- *
- * The following fields are specific to child caches.
- *
- * @memcg: Pointer to the memcg this cache belongs to.
- *
- * @children_node: List node for @root_cache->children list.
- *
- * @kmem_caches_node: List node for @memcg->kmem_caches list.
- */
-struct memcg_cache_params {
- struct kmem_cache *root_cache;
- union {
- struct {
- struct memcg_cache_array __rcu *memcg_caches;
- struct list_head __root_caches_node;
- struct list_head children;
- bool dying;
- };
- struct {
- struct mem_cgroup *memcg;
- struct list_head children_node;
- struct list_head kmem_caches_node;
- struct percpu_ref refcnt;
-
- void (*work_fn)(struct kmem_cache *);
- union {
- struct rcu_head rcu_head;
- struct work_struct work;
- };
- };
- };
-};
-
int memcg_update_all_caches(int num_memcgs);
/**
diff --git a/include/linux/soc/amlogic/meson-canvas.h b/include/linux/soc/amlogic/meson-canvas.h
index b4dde2fbeb3f..0cb2a6050d1f 100644
--- a/include/linux/soc/amlogic/meson-canvas.h
+++ b/include/linux/soc/amlogic/meson-canvas.h
@@ -20,6 +20,7 @@
#define MESON_CANVAS_ENDIAN_SWAP64 0x7
#define MESON_CANVAS_ENDIAN_SWAP128 0xf
+struct device;
struct meson_canvas;
/**
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index f3ae45d02e80..9618debb9ceb 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -13,9 +13,6 @@
#define CMDQ_NO_TIMEOUT 0xffffffffu
-/** cmdq event maximum */
-#define CMDQ_MAX_EVENT 0x3ff
-
struct cmdq_pkt;
struct cmdq_client {
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index bea46bd8b6ce..ea787201c3ac 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -4,6 +4,8 @@
#ifndef __SOUNDWIRE_H
#define __SOUNDWIRE_H
+#include <linux/mod_devicetable.h>
+
struct sdw_bus;
struct sdw_slave;
@@ -377,6 +379,8 @@ struct sdw_slave_prop {
* @dynamic_frame: Dynamic frame shape supported
* @err_threshold: Number of times that software may retry sending a single
* command
+ * @mclk_freq: clock reference passed to SoundWire Master, in Hz.
+ * @hw_disabled: if true, the Master is not functional, typically due to pin-mux
*/
struct sdw_master_prop {
u32 revision;
@@ -391,6 +395,8 @@ struct sdw_master_prop {
u32 default_col;
bool dynamic_frame;
u32 err_threshold;
+ u32 mclk_freq;
+ bool hw_disabled;
};
int sdw_master_read_prop(struct sdw_bus *bus);
@@ -538,6 +544,7 @@ struct sdw_slave_ops {
* @bus: Bus handle
* @ops: Slave callback ops
* @prop: Slave properties
+ * @debugfs: Slave debugfs
* @node: node for bus list
* @port_ready: Port ready completion flag for each Slave port
* @dev_num: Device Number assigned by Bus
@@ -549,6 +556,9 @@ struct sdw_slave {
struct sdw_bus *bus;
const struct sdw_slave_ops *ops;
struct sdw_slave_prop prop;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
struct list_head node;
struct completion *port_ready;
u16 dev_num;
@@ -718,6 +728,7 @@ struct sdw_master_ops {
* Bit set implies used number, bit clear implies unused number.
* @bus_lock: bus lock
* @msg_lock: message lock
+ * @compute_params: points to Bus resource management implementation
* @ops: Master callback ops
* @port_ops: Master port callback ops
* @params: Current bus parameters
@@ -725,6 +736,7 @@ struct sdw_master_ops {
* @m_rt_list: List of Master instance of all stream(s) running on Bus. This
* is used to compute and program bus bandwidth, clock, frame shape,
* transport and port parameters
+ * @debugfs: Bus debugfs
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
* @bank_switch_timeout: Bank switch timeout computed
@@ -739,11 +751,15 @@ struct sdw_bus {
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
struct mutex msg_lock;
+ int (*compute_params)(struct sdw_bus *bus);
const struct sdw_master_ops *ops;
const struct sdw_master_port_ops *port_ops;
struct sdw_bus_params params;
struct sdw_master_prop prop;
struct list_head m_rt_list;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
@@ -828,7 +844,7 @@ struct sdw_stream_params {
* @m_rt_count: Count of Master runtime(s) in this stream
*/
struct sdw_stream_runtime {
- char *name;
+ const char *name;
struct sdw_stream_params params;
enum sdw_stream_state state;
enum sdw_stream_type type;
@@ -836,7 +852,7 @@ struct sdw_stream_runtime {
int m_rt_count;
};
-struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name);
+struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
void sdw_release_stream(struct sdw_stream_runtime *stream);
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 4d70da45363d..c9427cb6020b 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -8,6 +8,7 @@
* struct sdw_intel_ops: Intel audio driver callback ops
*
* @config_stream: configure the stream with the hw_params
+ * the first argument containing the context is mandatory
*/
struct sdw_intel_ops {
int (*config_stream)(void *arg, void *substream,
diff --git a/include/linux/string.h b/include/linux/string.h
index 4deb11f7976b..b2f9df7f0761 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -474,8 +474,9 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
* But this can lead to bugs due to typos, or if prefix is a pointer
* and not a constant. Instead use str_has_prefix().
*
- * Returns: 0 if @str does not start with @prefix
- strlen(@prefix) if @str does start with @prefix
+ * Returns:
+ * * strlen(@prefix) if @str starts with @prefix
+ * * 0 if @str does not start with @prefix
*/
static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
{
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index c7f38e897174..f8603724fbee 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -87,6 +87,7 @@ struct cache_detail {
int has_died);
struct cache_head * (*alloc)(void);
+ void (*flush)(void);
int (*match)(struct cache_head *orig, struct cache_head *new);
void (*init)(struct cache_head *orig, struct cache_head *new);
void (*update)(struct cache_head *orig, struct cache_head *new);
@@ -107,9 +108,9 @@ struct cache_detail {
/* fields for communication over channel */
struct list_head queue;
- atomic_t readers; /* how many time is /chennel open */
- time_t last_close; /* if no readers, when did last close */
- time_t last_warn; /* when we last warned about no readers */
+ atomic_t writers; /* how many time is /channel open */
+ time_t last_close; /* if no writers, when did last close */
+ time_t last_warn; /* when we last warned about no writers */
union {
struct proc_dir_entry *procfs;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 27536b961552..a6ef35184ef1 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -242,9 +242,6 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
void rpc_sleep_on_priority(struct rpc_wait_queue *,
struct rpc_task *,
int priority);
-void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
- struct rpc_wait_queue *queue,
- struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 981f0d726ad4..40f65888dd38 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -42,6 +42,7 @@
#ifndef SVC_RDMA_H
#define SVC_RDMA_H
+#include <linux/llist.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/rpc_rdma.h>
@@ -107,8 +108,7 @@ struct svcxprt_rdma {
struct list_head sc_read_complete_q;
struct work_struct sc_work;
- spinlock_t sc_recv_lock;
- struct list_head sc_recv_ctxts;
+ struct llist_head sc_recv_ctxts;
};
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
@@ -125,6 +125,7 @@ enum {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
struct svc_rdma_recv_ctxt {
+ struct llist_node rc_node;
struct list_head rc_list;
struct ib_recv_wr rc_recv_wr;
struct ib_cqe rc_cqe;
@@ -200,7 +201,6 @@ extern struct svc_xprt_class svc_rdma_bc_class;
#endif
/* svc_rdma.c */
-extern struct workqueue_struct *svc_rdma_wq;
extern int svc_rdma_init(void);
extern void svc_rdma_cleanup(void);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 8a87d8bcb197..f33e5013bdfb 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -186,7 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
-extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
+extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 13e108bcc9eb..d783e15ba898 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -352,6 +352,7 @@ bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
void xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
+void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 86fc38ff0355..16c239e0d6dd 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,9 +49,9 @@
* fully-chunked NFS message (read chunks are the largest). Note only
* a single chunk type per message is supported currently.
*/
-#define RPCRDMA_MIN_SLOT_TABLE (2U)
+#define RPCRDMA_MIN_SLOT_TABLE (4U)
#define RPCRDMA_DEF_SLOT_TABLE (128U)
-#define RPCRDMA_MAX_SLOT_TABLE (256U)
+#define RPCRDMA_MAX_SLOT_TABLE (16384U)
#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */
#define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de2c67a33b7e..063c0c1e112b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
+extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
@@ -364,6 +365,7 @@ extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
+extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 3e2a80cc7b56..96305a64a5a7 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -53,18 +53,4 @@ extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-extern void t10_pi_prepare(struct request *rq, u8 protection_type);
-extern void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals);
-#else
-static inline void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals)
-{
-}
-static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 8d8821b3689a..659a4400517b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-static __always_inline bool
+static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
int sz = __compiletime_object_size(addr);
diff --git a/include/linux/time64.h b/include/linux/time64.h
index a620ee610b9f..19125489ae94 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -30,6 +30,8 @@ struct itimerspec64 {
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
+#define TIME64_MIN (-TIME64_MAX - 1)
+
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 34a038563d97..70bbdc38dc37 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -55,7 +55,7 @@
* as usual) and both source and destination can trigger faults.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef INLINE_COPY_FROM_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
return n;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS
-static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
diff --git a/include/linux/verification.h b/include/linux/verification.h
index 32d990d163c4..911ab7c2b1ab 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -32,6 +32,7 @@ extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
struct key;
+struct pkcs7_message;
extern int verify_pkcs7_signature(const void *data, size_t len,
const void *raw_pkcs7, size_t pkcs7_len,
@@ -41,6 +42,15 @@ extern int verify_pkcs7_signature(const void *data, size_t len,
const void *data, size_t len,
size_t asn1hdrlen),
void *ctx);
+extern int verify_pkcs7_message_sig(const void *data, size_t len,
+ struct pkcs7_message *pkcs7,
+ struct key *trusted_keys,
+ enum key_being_used_for usage,
+ int (*view_content)(void *ctx,
+ const void *data,
+ size_t len,
+ size_t asn1hdrlen),
+ void *ctx);
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index bae807eb2933..9aced11e9000 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -9,6 +9,8 @@
#endif
#ifdef CONFIG_PREEMPT
#define MODULE_VERMAGIC_PREEMPT "preempt "
+#elif defined(CONFIG_PREEMPT_RT)
+#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
#else
#define MODULE_VERMAGIC_PREEMPT ""
#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9b21d0047710..4e7809408073 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -18,6 +18,7 @@ struct notifier_block; /* in notifier.h */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
@@ -26,6 +27,7 @@ struct notifier_block; /* in notifier.h */
* vfree_atomic().
*/
#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
+
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -51,15 +53,21 @@ struct vmap_area {
unsigned long va_start;
unsigned long va_end;
- /*
- * Largest available free size in subtree.
- */
- unsigned long subtree_max_size;
- unsigned long flags;
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
- struct llist_node purge_list; /* "lazy purge" list */
- struct vm_struct *vm;
+
+ /*
+ * The following three variables can be packed, because
+ * a vmap_area object is always one of the three states:
+ * 1) in "free" tree (root is vmap_area_root)
+ * 2) in "busy" tree (root is free_vmap_area_root)
+ * 3) in purge list (head is vmap_purge_list)
+ */
+ union {
+ unsigned long subtree_max_size; /* in "free" tree */
+ struct vm_struct *vm; /* in "busy" tree */
+ struct llist_node purge_list; /* in purge list */
+ };
};
/*
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 7238865e75b0..51bf43076165 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool);
void zpool_destroy_pool(struct zpool *pool);
+bool zpool_malloc_support_movable(struct zpool *pool);
+
int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
unsigned long *handle);
@@ -90,6 +92,7 @@ struct zpool_driver {
struct zpool *zpool);
void (*destroy)(void *pool);
+ bool malloc_support_movable;
int (*malloc)(void *pool, size_t size, gfp_t gfp,
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index 4f385ec54f80..fe2fc9e91588 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -36,6 +36,8 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/cred.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
struct ib_addr {
union {
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 1052d0d62be7..a91b2af64ec4 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -42,7 +42,7 @@ struct ib_ucontext;
struct ib_umem_odp;
struct ib_umem {
- struct ib_ucontext *context;
+ struct ib_device *ibdev;
struct mm_struct *owning_mm;
size_t length;
unsigned long address;
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 479db5c98ff6..253df1a1fa54 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -37,11 +37,6 @@
#include <rdma/ib_verbs.h>
#include <linux/interval_tree.h>
-struct umem_odp_node {
- u64 __subtree_last;
- struct rb_node rb;
-};
-
struct ib_umem_odp {
struct ib_umem umem;
struct ib_ucontext_per_mm *per_mm;
@@ -72,7 +67,15 @@ struct ib_umem_odp {
int npages;
/* Tree tracking */
- struct umem_odp_node interval_tree;
+ struct interval_tree_node interval_tree;
+
+ /*
+ * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
+ * only as an anchor for the driver to hold onto the per_mm. FIXME:
+ * This should be removed and drivers should work with the per_mm
+ * directly.
+ */
+ bool is_implicit_odp;
struct completion notifier_completion;
int dying;
@@ -88,14 +91,13 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
/* Returns the first page of an ODP umem. */
static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
{
- return ALIGN_DOWN(umem_odp->umem.address, 1UL << umem_odp->page_shift);
+ return umem_odp->interval_tree.start;
}
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
{
- return ALIGN(umem_odp->umem.address + umem_odp->umem.length,
- 1UL << umem_odp->page_shift);
+ return umem_odp->interval_tree.last + 1;
}
static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
@@ -120,25 +122,20 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_ucontext_per_mm {
- struct ib_ucontext *context;
- struct mm_struct *mm;
+ struct mmu_notifier mn;
struct pid *tgid;
- bool active;
struct rb_root_cached umem_tree;
/* Protects umem_tree */
struct rw_semaphore umem_rwsem;
-
- struct mmu_notifier mn;
- unsigned int odp_mrs_count;
-
- struct list_head ucontext_list;
- struct rcu_head rcu;
};
-int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
-struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem,
- unsigned long addr, size_t size);
+struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
+ size_t size, int access);
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+ int access);
+struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem,
+ unsigned long addr, size_t size);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
@@ -163,8 +160,17 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
* Find first region intersecting with address range.
* Return NULL if not found
*/
-struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
- u64 addr, u64 length);
+static inline struct ib_umem_odp *
+rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
+{
+ struct interval_tree_node *node;
+
+ node = interval_tree_iter_first(root, addr, addr + length - 1);
+ if (!node)
+ return NULL;
+ return container_of(node, struct ib_umem_odp, interval_tree);
+
+}
static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
unsigned long mmu_seq)
@@ -185,9 +191,11 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
+static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata,
+ unsigned long addr,
+ size_t size, int access)
{
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4f225175cb91..6a47ba85c54c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -98,15 +98,54 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
#if defined(CONFIG_DYNAMIC_DEBUG)
#define ibdev_dbg(__dev, format, args...) \
dynamic_ibdev_dbg(__dev, format, ##args)
-#elif defined(DEBUG)
-#define ibdev_dbg(__dev, format, args...) \
- ibdev_printk(KERN_DEBUG, __dev, format, ##args)
#else
__printf(2, 3) __cold
static inline
void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
#endif
+#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_err_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_info_ratelimited(ibdev, fmt, ...) \
+ ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
+ __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+#else
+__printf(2, 3) __cold
+static inline
+void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
+#endif
+
union ib_gid {
u8 raw[16];
struct {
@@ -451,6 +490,16 @@ enum ib_port_state {
IB_PORT_ACTIVE_DEFER = 5
};
+enum ib_port_phys_state {
+ IB_PORT_PHYS_STATE_SLEEP = 1,
+ IB_PORT_PHYS_STATE_POLLING = 2,
+ IB_PORT_PHYS_STATE_DISABLED = 3,
+ IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
+ IB_PORT_PHYS_STATE_LINK_UP = 5,
+ IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
+ IB_PORT_PHYS_STATE_PHY_TEST = 7,
+};
+
enum ib_port_width {
IB_WIDTH_1X = 1,
IB_WIDTH_2X = 16,
@@ -1417,11 +1466,6 @@ struct ib_ucontext {
bool cleanup_retryable;
- void (*invalidate_range)(struct ib_umem_odp *umem_odp,
- unsigned long start, unsigned long end);
- struct mutex per_mm_list_lock;
- struct list_head per_mm_list;
-
struct ib_rdmacg_object cg_obj;
/*
* Implementation details of the RDMA core, don't use in drivers:
@@ -2378,6 +2422,8 @@ struct ib_device_ops {
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
+ void (*invalidate_range)(struct ib_umem_odp *umem_odp,
+ unsigned long start, unsigned long end);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -3713,6 +3759,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
NULL);
}
+struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+ int nr_cqe, enum ib_poll_context poll_ctx,
+ const char *caller);
+
+/**
+ * ib_alloc_cq_any: Allocate kernel CQ
+ * @dev: The IB device
+ * @private: Private data attached to the CQE
+ * @nr_cqe: Number of CQEs in the CQ
+ * @poll_ctx: Context used for polling the CQ
+ */
+static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
+ void *private, int nr_cqe,
+ enum ib_poll_context poll_ctx)
+{
+ return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
+ KBUILD_MODNAME);
+}
+
/**
* ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index b9fee7feeeb5..c89535047c42 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -33,6 +33,9 @@
#ifndef _IW_PORTMAP_H
#define _IW_PORTMAP_H
+#include <linux/socket.h>
+#include <linux/netlink.h>
+
#define IWPM_ULIBNAME_SIZE 32
#define IWPM_DEVNAME_SIZE 32
#define IWPM_IFNAME_SIZE 16
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 7147a9263011..bdbfe25d3854 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -33,6 +33,8 @@
#if !defined(OPA_PORT_INFO_H)
#define OPA_PORT_INFO_H
+#include <rdma/opa_smi.h>
+
#define OPA_PORT_LINK_MODE_NOP 0 /* No change */
#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 6631624e4d7c..ab22759de7ea 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -76,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
/**
* Send the supplied skb to a specific userspace PID.
+ * @net: Net namespace in which to send the skb
* @skb: The netlink skb
* @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code.
*/
-int rdma_nl_unicast(struct sk_buff *skb, u32 pid);
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid);
/**
* Send, with wait/1 retry, the supplied skb to a specific userspace PID.
+ * @net: Net namespace in which to send the skb
* @skb: The netlink skb
* @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code.
*/
-int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid);
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid);
/**
* Send the supplied skb to a netlink group.
+ * @net: Net namespace in which to send the skb
* @skb: The netlink skb
* @group: Netlink group ID
* @flags: allocation flags
* Returns 0 on success or a negative error code.
*/
-int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
+ unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 525848e227dc..ac5a9430abb6 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -116,6 +116,7 @@ struct rvt_ibport {
u64 n_unaligned;
u64 n_rc_dupreq;
u64 n_rc_seqnak;
+ u64 n_rc_crwaits;
u16 pkey_violations;
u16 qkey_violations;
u16 mkey_violations;
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index 04c519ef6d71..574eb7278f46 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -53,6 +53,7 @@
#include <linux/kthread.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
/*
* Define an ib_cq_notify value that is not valid so we know when CQ
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index e06c77d76463..b550ae89bf85 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -973,6 +973,41 @@ static inline void rvt_free_rq(struct rvt_rq *rq)
rq->wq = NULL;
}
+/**
+ * rvt_to_iport - Get the ibport pointer
+ * @qp: the qp pointer
+ *
+ * This function returns the ibport pointer from the qp pointer.
+ */
+static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ return rdi->ports[qp->port_num - 1];
+}
+
+/**
+ * rvt_rc_credit_avail - Check if there are enough RC credits for the request
+ * @qp: the qp
+ * @wqe: the request
+ *
+ * This function returns false when there are not enough credits for the given
+ * request and true otherwise.
+ */
+static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ lockdep_assert_held(&qp->s_lock);
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+ rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
+ struct rvt_ibport *rvp = rvt_to_iport(qp);
+
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ rvp->n_rc_crwaits++;
+ return false;
+ }
+ return true;
+}
+
struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
u64 v,
void (*cb)(struct rvt_qp *qp, u64 v));
diff --git a/include/rdma/signature.h b/include/rdma/signature.h
index f24cc2a1d3c5..d16b0fcc8344 100644
--- a/include/rdma/signature.h
+++ b/include/rdma/signature.h
@@ -6,6 +6,8 @@
#ifndef _RDMA_SIGNATURE_H_
#define _RDMA_SIGNATURE_H_
+#include <linux/types.h>
+
enum ib_signature_prot_cap {
IB_PROT_T10DIF_TYPE_1 = 1,
IB_PROT_T10DIF_TYPE_2 = 1 << 1,
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 76ed5e4acd38..91bd749a02f7 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -57,6 +57,7 @@ struct scsi_pointer {
#define SCMD_TAGGED (1 << 0)
#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
#define SCMD_INITIALIZED (1 << 2)
+#define SCMD_LAST (1 << 3)
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index e03bd9d41fa8..7b196d234626 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -6,8 +6,6 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_sense_hdr;
-#define SCSI_LOG_BUFSIZE 128
-
extern void scsi_print_command(struct scsi_cmnd *);
extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index cc139dbd71e5..31e0d6ca1eba 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -80,8 +80,10 @@ struct scsi_host_template {
* command block to the LLDD. When the driver finished
* processing the command the done callback is invoked.
*
- * If queuecommand returns 0, then the HBA has accepted the
- * command. The done() function must be called on the command
+ * If queuecommand returns 0, then the driver has accepted the
+ * command. It must also push it to the HBA if the scsi_cmnd
+ * flag SCMD_LAST is set, or if the driver does not implement
+ * commit_rqs. The done() function must be called on the command
* when the driver has finished with it. (you may call done on the
* command before queuecommand returns, but in this case you
* *must* return 0 from queuecommand).
@@ -110,6 +112,16 @@ struct scsi_host_template {
int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
/*
+ * The commit_rqs function is used to trigger a hardware
+ * doorbell after some requests have been queued with
+ * queuecommand, when an error is encountered before sending
+ * the request with SCMD_LAST set.
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*commit_rqs)(struct Scsi_Host *, u16);
+
+ /*
* This is an error handling strategy routine. You don't need to
* define one of these if you don't want to - there is a default
* routine that is present that should work in most cases. For those
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index f6a4eaa85a3e..a13830616107 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -451,20 +451,81 @@ TRACE_EVENT(xprtrdma_createmrs,
TP_STRUCT__entry(
__field(const void *, r_xprt)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
__field(unsigned int, count)
),
TP_fast_assign(
__entry->r_xprt = r_xprt;
__entry->count = count;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("r_xprt=%p: created %u MRs",
- __entry->r_xprt, __entry->count
+ TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->count
)
);
-DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
+TRACE_EVENT(xprtrdma_mr_get,
+ TP_PROTO(
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+
+ __entry->req = req;
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->req
+ )
+);
+
+TRACE_EVENT(xprtrdma_nomrs,
+ TP_PROTO(
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+
+ __entry->req = req;
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->req
+ )
+);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
@@ -623,21 +684,21 @@ TRACE_EVENT(xprtrdma_post_send,
TRACE_EVENT(xprtrdma_post_recv,
TP_PROTO(
- const struct ib_cqe *cqe
+ const struct rpcrdma_rep *rep
),
- TP_ARGS(cqe),
+ TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(const void *, rep)
),
TP_fast_assign(
- __entry->cqe = cqe;
+ __entry->rep = rep;
),
- TP_printk("cqe=%p",
- __entry->cqe
+ TP_printk("rep=%p",
+ __entry->rep
)
);
@@ -715,14 +776,15 @@ TRACE_EVENT(xprtrdma_wc_receive,
TP_ARGS(wc),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(const void *, rep)
__field(u32, byte_len)
__field(unsigned int, status)
__field(u32, vendor_err)
),
TP_fast_assign(
- __entry->cqe = wc->wr_cqe;
+ __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
+ rr_cqe);
__entry->status = wc->status;
if (wc->status) {
__entry->byte_len = 0;
@@ -733,8 +795,8 @@ TRACE_EVENT(xprtrdma_wc_receive,
}
),
- TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
- __entry->cqe, __entry->byte_len,
+ TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
+ __entry->rep, __entry->byte_len,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 3a27335fce2c..c2ce6480b4b1 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
+ 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -316,8 +317,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -360,8 +361,9 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ wb->bdi->dev ? dev_name(wb->bdi->dev) :
+ "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -414,7 +416,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%u",
@@ -436,7 +438,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -461,7 +463,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -512,7 +514,7 @@ TRACE_EVENT(writeback_queue_io,
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -598,7 +600,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -663,7 +665,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -723,8 +725,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -797,8 +799,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 63b1f506ea67..c160a5354eb6 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -67,6 +67,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 11cc57322962..c99b4f2482c6 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -128,6 +128,10 @@ extern "C" {
* for the second page onward should be set to NC.
*/
#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
+/* Flag that BO may contain sensitive data that must be wiped before
+ * releasing the memory
+ */
+#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 5ab331e5dc23..735c8cfdaaa1 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -361,6 +361,7 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DSI 16
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
+#define DRM_MODE_CONNECTOR_SPI 19
struct drm_mode_get_connector {
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 0d5c49dc478c..09d0df8b71c5 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -73,6 +73,7 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
+#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
#define ETNA_MAX_PIPES 4
@@ -148,6 +149,11 @@ struct drm_etnaviv_gem_submit_reloc {
* then patching the cmdstream for this entry is skipped. This can
* avoid kernel needing to map/access the cmdstream bo in the common
* case.
+ * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
+ * field is interpreted as the fixed location to map the bo into the gpu
+ * virtual address space. If the kernel is unable to map the buffer at
+ * this location the submit will fail. This means userspace is responsible
+ * for the whole gpu virtual address management.
*/
#define ETNA_SUBMIT_BO_READ 0x0001
#define ETNA_SUBMIT_BO_WRITE 0x0002
@@ -177,9 +183,11 @@ struct drm_etnaviv_gem_submit_pmr {
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
+#define ETNA_SUBMIT_SOFTPIN 0x0008
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
ETNA_SUBMIT_FENCE_FD_IN | \
- ETNA_SUBMIT_FENCE_FD_OUT)
+ ETNA_SUBMIT_FENCE_FD_OUT| \
+ ETNA_SUBMIT_SOFTPIN)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 328d05e77d9f..469dc512cca3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
+#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
#define I915_PARAM_HUC_STATUS 42
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index b5d370638846..ec19db1eead8 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -20,6 +20,7 @@ extern "C" {
#define DRM_PANFROST_GET_BO_OFFSET 0x05
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
+#define DRM_PANFROST_MADVISE 0x08
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -27,6 +28,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -82,6 +84,9 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns; /* absolute */
};
+#define PANFROST_BO_NOEXEC 1
+#define PANFROST_BO_HEAP 2
+
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
*
@@ -127,6 +132,45 @@ struct drm_panfrost_mmap_bo {
enum drm_panfrost_param {
DRM_PANFROST_PARAM_GPU_PROD_ID,
+ DRM_PANFROST_PARAM_GPU_REVISION,
+ DRM_PANFROST_PARAM_SHADER_PRESENT,
+ DRM_PANFROST_PARAM_TILER_PRESENT,
+ DRM_PANFROST_PARAM_L2_PRESENT,
+ DRM_PANFROST_PARAM_STACK_PRESENT,
+ DRM_PANFROST_PARAM_AS_PRESENT,
+ DRM_PANFROST_PARAM_JS_PRESENT,
+ DRM_PANFROST_PARAM_L2_FEATURES,
+ DRM_PANFROST_PARAM_CORE_FEATURES,
+ DRM_PANFROST_PARAM_TILER_FEATURES,
+ DRM_PANFROST_PARAM_MEM_FEATURES,
+ DRM_PANFROST_PARAM_MMU_FEATURES,
+ DRM_PANFROST_PARAM_THREAD_FEATURES,
+ DRM_PANFROST_PARAM_MAX_THREADS,
+ DRM_PANFROST_PARAM_THREAD_MAX_WORKGROUP_SZ,
+ DRM_PANFROST_PARAM_THREAD_MAX_BARRIER_SZ,
+ DRM_PANFROST_PARAM_COHERENCY_FEATURES,
+ DRM_PANFROST_PARAM_TEXTURE_FEATURES0,
+ DRM_PANFROST_PARAM_TEXTURE_FEATURES1,
+ DRM_PANFROST_PARAM_TEXTURE_FEATURES2,
+ DRM_PANFROST_PARAM_TEXTURE_FEATURES3,
+ DRM_PANFROST_PARAM_JS_FEATURES0,
+ DRM_PANFROST_PARAM_JS_FEATURES1,
+ DRM_PANFROST_PARAM_JS_FEATURES2,
+ DRM_PANFROST_PARAM_JS_FEATURES3,
+ DRM_PANFROST_PARAM_JS_FEATURES4,
+ DRM_PANFROST_PARAM_JS_FEATURES5,
+ DRM_PANFROST_PARAM_JS_FEATURES6,
+ DRM_PANFROST_PARAM_JS_FEATURES7,
+ DRM_PANFROST_PARAM_JS_FEATURES8,
+ DRM_PANFROST_PARAM_JS_FEATURES9,
+ DRM_PANFROST_PARAM_JS_FEATURES10,
+ DRM_PANFROST_PARAM_JS_FEATURES11,
+ DRM_PANFROST_PARAM_JS_FEATURES12,
+ DRM_PANFROST_PARAM_JS_FEATURES13,
+ DRM_PANFROST_PARAM_JS_FEATURES14,
+ DRM_PANFROST_PARAM_JS_FEATURES15,
+ DRM_PANFROST_PARAM_NR_CORE_GROUPS,
+ DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
};
struct drm_panfrost_get_param {
@@ -159,6 +203,26 @@ struct drm_panfrost_perfcnt_dump {
__u64 buf_ptr;
};
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */
+
+struct drm_panfrost_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, PANFROST_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h
index e4a79f80b9a0..ab5c7e847eed 100644
--- a/include/uapi/linux/coff.h
+++ b/include/uapi/linux/coff.h
@@ -11,6 +11,9 @@
more information about COFF, then O'Reilly has a very excellent book.
*/
+#ifndef _UAPI_LINUX_COFF_H
+#define _UAPI_LINUX_COFF_H
+
#define E_SYMNMLEN 8 /* Number of characters in a symbol name */
#define E_FILNMLEN 14 /* Number of characters in a file name */
#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */
@@ -350,3 +353,5 @@ struct COFF_reloc {
/* For new sections we haven't heard of before */
#define COFF_DEF_SECTION_ALIGNMENT 4
+
+#endif /* _UAPI_LINUX_COFF_H */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index f396a82dfd3e..2df8ceca1f9b 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -243,6 +243,7 @@ enum {
DM_TARGET_MSG_CMD,
DM_DEV_SET_GEOMETRY_CMD,
DM_DEV_ARM_POLL_CMD,
+ DM_GET_TARGET_VERSION_CMD,
};
#define DM_IOCTL 0xfd
@@ -265,14 +266,15 @@ enum {
#define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl)
#define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl)
+#define DM_GET_TARGET_VERSION _IOWR(DM_IOCTL, DM_GET_TARGET_VERSION_CMD, struct dm_ioctl)
#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 40
+#define DM_VERSION_MINOR 41
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2019-01-18)"
+#define DM_VERSION_EXTRA "-ioctl (2019-09-16)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index aad225b05be7..379a612f8f1d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -264,6 +264,7 @@ struct fsxattr {
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */
#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
+#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */
#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 2971d29a42e4..802b0377a49e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -133,6 +133,8 @@
*
* 7.31
* - add FUSE_WRITE_KILL_PRIV flag
+ * - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING
+ * - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag
*/
#ifndef _LINUX_FUSE_H
@@ -274,6 +276,7 @@ struct fuse_file_lock {
* FUSE_CACHE_SYMLINKS: cache READLINK responses
* FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
* FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request
+ * FUSE_MAP_ALIGNMENT: map_alignment field is valid
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -301,6 +304,7 @@ struct fuse_file_lock {
#define FUSE_CACHE_SYMLINKS (1 << 23)
#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
#define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
+#define FUSE_MAP_ALIGNMENT (1 << 26)
/**
* CUSE INIT request/reply flags
@@ -422,9 +426,15 @@ enum fuse_opcode {
FUSE_RENAME2 = 45,
FUSE_LSEEK = 46,
FUSE_COPY_FILE_RANGE = 47,
+ FUSE_SETUPMAPPING = 48,
+ FUSE_REMOVEMAPPING = 49,
/* CUSE specific operations */
CUSE_INIT = 4096,
+
+ /* Reserved opcodes: helpful to detect structure endian-ness */
+ CUSE_INIT_BSWAP_RESERVED = 1048576, /* CUSE_INIT << 8 */
+ FUSE_INIT_BSWAP_RESERVED = 436207616, /* FUSE_INIT << 24 */
};
enum fuse_notify_code {
@@ -652,7 +662,7 @@ struct fuse_init_out {
uint32_t max_write;
uint32_t time_gran;
uint16_t max_pages;
- uint16_t padding;
+ uint16_t map_alignment;
uint32_t unused[8];
};
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 96ee9d94b73e..ea57526a5b89 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -28,6 +28,7 @@ struct io_uring_sqe {
__u16 poll_events;
__u32 sync_range_flags;
__u32 msg_flags;
+ __u32 timeout_flags;
};
__u64 user_data; /* data to be passed back at completion time */
union {
@@ -61,6 +62,7 @@ struct io_uring_sqe {
#define IORING_OP_SYNC_FILE_RANGE 8
#define IORING_OP_SENDMSG 9
#define IORING_OP_RECVMSG 10
+#define IORING_OP_TIMEOUT 11
/*
* sqe->fsync_flags
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 233efbb1c81c..52641d8ca9e8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -999,6 +999,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
#define KVM_CAP_PMU_EVENT_FILTER 173
#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
+#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1145,6 +1146,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_S390 0x5000000000000000ULL
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
+#define KVM_REG_RISCV 0x8000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 2a6b253cfb05..16c1fa2d89a4 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101c */
+/* RGB - next is 0x101d */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -55,6 +55,7 @@
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
+#define MEDIA_BUS_FMT_RGB888_3X8 0x101c
#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
diff --git a/include/uapi/linux/nfsd/cld.h b/include/uapi/linux/nfsd/cld.h
index b1e9de4f07d5..a519313af953 100644
--- a/include/uapi/linux/nfsd/cld.h
+++ b/include/uapi/linux/nfsd/cld.h
@@ -26,17 +26,22 @@
#include <linux/types.h>
/* latest upcall version available */
-#define CLD_UPCALL_VERSION 1
+#define CLD_UPCALL_VERSION 2
/* defined by RFC3530 */
#define NFS4_OPAQUE_LIMIT 1024
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
enum cld_command {
Cld_Create, /* create a record for this cm_id */
Cld_Remove, /* remove record of this cm_id */
Cld_Check, /* is this cm_id allowed? */
Cld_GraceDone, /* grace period is complete */
- Cld_GraceStart,
+ Cld_GraceStart, /* grace start (upload client records) */
+ Cld_GetVersion, /* query max supported upcall version */
};
/* representation of long-form NFSv4 client ID */
@@ -45,6 +50,17 @@ struct cld_name {
unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */
} __attribute__((packed));
+/* sha256 hash of the kerberos principal */
+struct cld_princhash {
+ __u8 cp_len; /* length of cp_data */
+ unsigned char cp_data[SHA256_DIGEST_SIZE]; /* hash of principal */
+} __attribute__((packed));
+
+struct cld_clntinfo {
+ struct cld_name cc_name;
+ struct cld_princhash cc_princhash;
+} __attribute__((packed));
+
/* message struct for communication with userspace */
struct cld_msg {
__u8 cm_vers; /* upcall version */
@@ -54,7 +70,28 @@ struct cld_msg {
union {
__s64 cm_gracetime; /* grace period start time */
struct cld_name cm_name;
+ __u8 cm_version; /* for getting max version */
+ } __attribute__((packed)) cm_u;
+} __attribute__((packed));
+
+/* version 2 message can include hash of kerberos principal */
+struct cld_msg_v2 {
+ __u8 cm_vers; /* upcall version */
+ __u8 cm_cmd; /* upcall command */
+ __s16 cm_status; /* return code */
+ __u32 cm_xid; /* transaction id */
+ union {
+ struct cld_name cm_name;
+ __u8 cm_version; /* for getting max version */
+ struct cld_clntinfo cm_clntinfo; /* name & princ hash */
} __attribute__((packed)) cm_u;
} __attribute__((packed));
+struct cld_msg_hdr {
+ __u8 cm_vers; /* upcall version */
+ __u8 cm_cmd; /* upcall command */
+ __s16 cm_status; /* return code */
+ __u32 cm_xid; /* transaction id */
+} __attribute__((packed));
+
#endif /* !_NFSD_CLD_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index f28e562d7ca8..29d6e93fd15e 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -591,6 +591,7 @@
#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
+#define PCI_EXP_SLTCTL_ATTN_IND_SHIFT 6 /* Attention Indicator shift */
#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */
#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */
@@ -713,7 +714,9 @@
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
+#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
+#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -1053,4 +1056,14 @@
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+/* Data Link Feature */
+#define PCI_DLF_CAP 0x04 /* Capabilities Register */
+#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */
+
+/* Physical Layer 16.0 GT/s */
+#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */
+#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F
+#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
+#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index a0cac1d8670d..50e991952c97 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -82,5 +82,6 @@
#define SERIO_EGALAX 0x3f
#define SERIO_PULSE8_CEC 0x40
#define SERIO_RAINSHADOW_CEC 0x41
+#define SERIO_FSIA6B 0x42
#endif /* _UAPI_SERIO_H */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 8f10748dac79..9e843a147ead 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -295,15 +295,38 @@ struct vfio_region_info_cap_type {
__u32 subtype; /* type specific */
};
+/*
+ * List of region types, global per bus driver.
+ * If you introduce a new type, please add it here.
+ */
+
+/* PCI region type containing a PCI vendor part */
#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31)
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
+#define VFIO_REGION_TYPE_GFX (1)
+#define VFIO_REGION_TYPE_CCW (2)
+
+/* sub-types for VFIO_REGION_TYPE_PCI_* */
-/* 8086 Vendor sub-types */
+/* 8086 vendor PCI sub-types */
#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
-#define VFIO_REGION_TYPE_GFX (1)
+/* 10de vendor PCI sub-types */
+/*
+ * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ */
+#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
+
+/* 1014 vendor PCI sub-types */
+/*
+ * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
+ * to do TLB invalidation on a GPU.
+ */
+#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
+
+/* sub-types for VFIO_REGION_TYPE_GFX */
#define VFIO_REGION_SUBTYPE_GFX_EDID (1)
/**
@@ -353,26 +376,10 @@ struct vfio_region_gfx_edid {
#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
};
-#define VFIO_REGION_TYPE_CCW (2)
-/* ccw sub-types */
+/* sub-types for VFIO_REGION_TYPE_CCW */
#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
/*
- * 10de vendor sub-type
- *
- * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
- */
-#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
-
-/*
- * 1014 vendor sub-type
- *
- * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
- * to do TLB invalidation on a GPU.
- */
-#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
-
-/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
* which allows direct access to non-MSIX registers which happened to be within
* the same system page.
@@ -714,7 +721,31 @@ struct vfio_iommu_type1_info {
__u32 argsz;
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
- __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
+ __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+ __u32 cap_offset; /* Offset within info struct of first cap */
+};
+
+/*
+ * The IOVA capability allows to report the valid IOVA range(s)
+ * excluding any non-relaxable reserved regions exposed by
+ * devices attached to the container. Any DMA map attempt
+ * outside the valid iova range will return error.
+ *
+ * The structures below define version 1 of this capability.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1
+
+struct vfio_iova_range {
+ __u64 start;
+ __u64 end;
+};
+
+struct vfio_iommu_type1_info_cap_iova_range {
+ struct vfio_info_cap_header header;
+ __u32 nr_iovas;
+ __u32 reserved;
+ struct vfio_iova_range iova_ranges[];
};
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h
new file mode 100644
index 000000000000..b02eb2ac3d99
--- /dev/null
+++ b/include/uapi/linux/virtio_fs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+
+#ifndef _UAPI_LINUX_VIRTIO_FS_H
+#define _UAPI_LINUX_VIRTIO_FS_H
+
+#include <linux/types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_types.h>
+
+struct virtio_fs_config {
+ /* Filesystem name (UTF-8, not NUL-terminated, padded with NULs) */
+ __u8 tag[36];
+
+ /* Number of request queues */
+ __u32 num_request_queues;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_VIRTIO_FS_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 348fd0176f75..585e07b27333 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -44,6 +44,7 @@
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 7e9900b0e746..88b6ca70c2fe 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
};
enum mlx5_ib_uapi_flow_action_packet_reformat_type {
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 52f32a60d056..3ae65e93235c 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -8,6 +8,8 @@
#ifndef SCSI_BSG_FC_H
#define SCSI_BSG_FC_H
+#include <linux/types.h>
+
/*
* This file intended to be included by both kernel and user space
*/
@@ -66,10 +68,10 @@
* with the transport upon completion of the login.
*/
struct fc_bsg_host_add_rport {
- uint8_t reserved;
+ __u8 reserved;
/* FC Address Identier of the remote port to login to */
- uint8_t port_id[3];
+ __u8 port_id[3];
};
/* Response:
@@ -87,10 +89,10 @@ struct fc_bsg_host_add_rport {
* remain logged in with the remote port.
*/
struct fc_bsg_host_del_rport {
- uint8_t reserved;
+ __u8 reserved;
/* FC Address Identier of the remote port to logout of */
- uint8_t port_id[3];
+ __u8 port_id[3];
};
/* Response:
@@ -111,10 +113,10 @@ struct fc_bsg_host_els {
* ELS Command Code being sent (must be the same as byte 0
* of the payload)
*/
- uint8_t command_code;
+ __u8 command_code;
/* FC Address Identier of the remote port to send the ELS to */
- uint8_t port_id[3];
+ __u8 port_id[3];
};
/* Response:
@@ -151,14 +153,14 @@ struct fc_bsg_ctels_reply {
* Note: x_RJT/BSY status will indicae that the rjt_data field
* is valid and contains the reason/explanation values.
*/
- uint32_t status; /* See FC_CTELS_STATUS_xxx */
+ __u32 status; /* See FC_CTELS_STATUS_xxx */
/* valid if status is not FC_CTELS_STATUS_OK */
struct {
- uint8_t action; /* fragment_id for CT REJECT */
- uint8_t reason_code;
- uint8_t reason_explanation;
- uint8_t vendor_unique;
+ __u8 action; /* fragment_id for CT REJECT */
+ __u8 reason_code;
+ __u8 reason_explanation;
+ __u8 vendor_unique;
} rjt_data;
};
@@ -174,17 +176,17 @@ struct fc_bsg_ctels_reply {
* and whether to tear it down after the request.
*/
struct fc_bsg_host_ct {
- uint8_t reserved;
+ __u8 reserved;
/* FC Address Identier of the remote port to send the ELS to */
- uint8_t port_id[3];
+ __u8 port_id[3];
/*
* We need words 0-2 of the generic preamble for the LLD's
*/
- uint32_t preamble_word0; /* revision & IN_ID */
- uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
- uint32_t preamble_word2; /* Cmd Code, Max Size */
+ __u32 preamble_word0; /* revision & IN_ID */
+ __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
+ __u32 preamble_word2; /* Cmd Code, Max Size */
};
/* Response:
@@ -204,17 +206,17 @@ struct fc_bsg_host_vendor {
* Identifies the vendor that the message is formatted for. This
* should be the recipient of the message.
*/
- uint64_t vendor_id;
+ __u64 vendor_id;
/* start of vendor command area */
- uint32_t vendor_cmd[0];
+ __u32 vendor_cmd[0];
};
/* Response:
*/
struct fc_bsg_host_vendor_reply {
/* start of vendor response area */
- uint32_t vendor_rsp[0];
+ __u32 vendor_rsp[0];
};
@@ -233,7 +235,7 @@ struct fc_bsg_rport_els {
* ELS Command Code being sent (must be the same as
* byte 0 of the payload)
*/
- uint8_t els_code;
+ __u8 els_code;
};
/* Response:
@@ -251,9 +253,9 @@ struct fc_bsg_rport_ct {
/*
* We need words 0-2 of the generic preamble for the LLD's
*/
- uint32_t preamble_word0; /* revision & IN_ID */
- uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
- uint32_t preamble_word2; /* Cmd Code, Max Size */
+ __u32 preamble_word0; /* revision & IN_ID */
+ __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
+ __u32 preamble_word2; /* Cmd Code, Max Size */
};
/* Response:
*
@@ -265,7 +267,7 @@ struct fc_bsg_rport_ct {
/* request (CDB) structure of the sg_io_v4 */
struct fc_bsg_request {
- uint32_t msgcode;
+ __u32 msgcode;
union {
struct fc_bsg_host_add_rport h_addrport;
struct fc_bsg_host_del_rport h_delrport;
@@ -289,10 +291,10 @@ struct fc_bsg_reply {
* msg and status fields. The per-msgcode reply structure
* will contain valid data.
*/
- uint32_t result;
+ __u32 result;
/* If there was reply_payload, how much was recevied ? */
- uint32_t reply_payload_rcv_len;
+ __u32 reply_payload_rcv_len;
union {
struct fc_bsg_host_vendor_reply vendor_reply;
diff --git a/include/uapi/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h
index 5dd382054e45..1b1737c3c9d8 100644
--- a/include/uapi/scsi/scsi_netlink.h
+++ b/include/uapi/scsi/scsi_netlink.h
@@ -26,12 +26,12 @@
/* SCSI_TRANSPORT_MSG event message header */
struct scsi_nl_hdr {
- uint8_t version;
- uint8_t transport;
- uint16_t magic;
- uint16_t msgtype;
- uint16_t msglen;
-} __attribute__((aligned(sizeof(uint64_t))));
+ __u8 version;
+ __u8 transport;
+ __u16 magic;
+ __u16 msgtype;
+ __u16 msglen;
+} __attribute__((aligned(sizeof(__u64))));
/* scsi_nl_hdr->version value */
#define SCSI_NL_VERSION 1
@@ -75,10 +75,10 @@ struct scsi_nl_hdr {
*/
struct scsi_nl_host_vendor_msg {
struct scsi_nl_hdr snlh; /* must be 1st element ! */
- uint64_t vendor_id;
- uint16_t host_no;
- uint16_t vmsg_datalen;
-} __attribute__((aligned(sizeof(uint64_t))));
+ __u64 vendor_id;
+ __u16 host_no;
+ __u16 vmsg_datalen;
+} __attribute__((aligned(sizeof(__u64))));
/*
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index a39023579051..7535253f1a96 100644
--- a/include/uapi/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
@@ -7,6 +7,7 @@
#ifndef SCSI_NETLINK_FC_H
#define SCSI_NETLINK_FC_H
+#include <linux/types.h>
#include <scsi/scsi_netlink.h>
/*
@@ -43,14 +44,14 @@
*/
struct fc_nl_event {
struct scsi_nl_hdr snlh; /* must be 1st element ! */
- uint64_t seconds;
- uint64_t vendor_id;
- uint16_t host_no;
- uint16_t event_datalen;
- uint32_t event_num;
- uint32_t event_code;
- uint32_t event_data;
-} __attribute__((aligned(sizeof(uint64_t))));
+ __u64 seconds;
+ __u64 vendor_id;
+ __u16 host_no;
+ __u16 event_datalen;
+ __u32 event_num;
+ __u32 event_code;
+ __u32 event_data;
+} __attribute__((aligned(sizeof(__u64))));
#endif /* SCSI_NETLINK_FC_H */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 2982571f7cc1..43ef24dd030e 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -19,8 +19,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
-extern const struct dma_map_ops *xen_dma_ops;
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 2ca9164a79bf..b9cc11e887ed 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -2,15 +2,19 @@
#ifndef _XEN_ARM_PAGE_COHERENT_H
#define _XEN_ARM_PAGE_COHERENT_H
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
-void __xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
+#include <linux/dma-mapping.h>
+#include <asm/page.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+ return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+ dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
#endif /* _XEN_ARM_PAGE_COHERENT_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 5e4b83f83dbc..d71380f6ed0b 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,6 +4,11 @@
#include <linux/swiotlb.h>
+void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
+ phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+ phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;