summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/atomic-instrumented.h476
-rw-r--r--include/asm-generic/pgtable-nop4d.h9
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/bridge/analogix_dp.h20
-rw-r--r--include/drm/bridge/dw_hdmi.h24
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h17
-rw-r--r--include/drm/drm_atomic.h24
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_bridge.h35
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/drm_color_mgmt.h31
-rw-r--r--include/drm/drm_connector.h22
-rw-r--r--include/drm/drm_dp_helper.h66
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_hdcp.h41
-rw-r--r--include/drm/drm_mode_config.h12
-rw-r--r--include/drm/drm_mode_object.h24
-rw-r--r--include/drm/drm_modes.h24
-rw-r--r--include/drm/drm_plane.h32
-rw-r--r--include/drm/drm_plane_helper.h1
-rw-r--r--include/drm/drm_prime.h22
-rw-r--r--include/drm/drm_print.h119
-rw-r--r--include/drm/drm_property.h26
-rw-r--r--include/drm/drm_simple_kms_helper.h53
-rw-r--r--include/drm/drm_vblank.h20
-rw-r--r--include/drm/i915_component.h3
-rw-r--r--include/drm/i915_pciids.h39
-rw-r--r--include/drm/tinydrm/ili9341.h54
-rw-r--r--include/drm/tinydrm/mipi-dbi.h5
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h4
-rw-r--r--include/drm/ttm/ttm_bo_api.h32
-rw-r--r--include/drm/ttm/ttm_bo_driver.h250
-rw-r--r--include/drm/ttm/ttm_memory.h5
-rw-r--r--include/drm/ttm/ttm_tt.h272
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/backlight.h58
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/compat.h26
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/hw_breakpoint.h7
-rw-r--r--include/linux/hypervisor.h17
-rw-r--r--include/linux/if_tun.h4
-rw-r--r--include/linux/if_vlan.h79
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/irqchip/arm-gic.h1
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/mutex.h1
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/of.h6
-rw-r--r--include/linux/of_pci.h17
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-refcount.h18
-rw-r--r--include/linux/perf_event.h44
-rw-r--r--include/linux/phy.h5
-rw-r--r--include/linux/rcupdate.h10
-rw-r--r--include/linux/rhashtable.h4
-rw-r--r--include/linux/sched.h30
-rw-r--r--include/linux/sched/cpufreq.h5
-rw-r--r--include/linux/sched/nohz.h2
-rw-r--r--include/linux/skbuff.h22
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/u64_stats_sync.h22
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/vga_switcheroo.h6
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h4
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sock.h1
-rw-r--r--include/rdma/ib_addr.h2
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/sound/hdaudio.h3
-rw-r--r--include/trace/events/mmc.h4
-rw-r--r--include/trace/events/rcu.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h7
-rw-r--r--include/uapi/drm/drm_mode.h43
-rw-r--r--include/uapi/drm/etnaviv_drm.h6
-rw-r--r--include/uapi/drm/i915_drm.h112
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--include/uapi/drm/vc4_drm.h76
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h130
-rw-r--r--include/uapi/linux/lirc.h1
-rw-r--r--include/uapi/linux/perf_event.h27
-rw-r--r--include/uapi/linux/usb/audio.h4
-rw-r--r--include/uapi/misc/ocxl.h17
108 files changed, 2124 insertions, 615 deletions
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index dfbd9d990637..9c2e0708eb82 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -8,6 +8,7 @@
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
#define P4D_MASK PGDIR_MASK
+#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define p4d_t pgd_t
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
new file mode 100644
index 000000000000..ec07f23678ea
--- /dev/null
+++ b/include/asm-generic/atomic-instrumented.h
@@ -0,0 +1,476 @@
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+static __always_inline int atomic_read(const atomic_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic_read(v);
+}
+
+static __always_inline s64 atomic64_read(const atomic64_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+
+static __always_inline void atomic_set(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline int atomic_xchg(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+#ifdef arch_atomic_try_cmpxchg
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+#endif
+
+#ifdef arch_atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+#endif
+
+static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return __arch_atomic_add_unless(v, a, u);
+}
+
+
+static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline void atomic_inc(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_inc(v);
+}
+
+static __always_inline void atomic64_inc(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+
+static __always_inline void atomic_dec(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_dec(v);
+}
+
+static __always_inline void atomic64_dec(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+
+static __always_inline void atomic_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_add(i, v);
+}
+
+static __always_inline void atomic64_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline void atomic_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline void atomic_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_and(i, v);
+}
+
+static __always_inline void atomic64_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline void atomic_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_or(i, v);
+}
+
+static __always_inline void atomic64_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline void atomic_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline int atomic_inc_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline s64 atomic64_inc_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline int atomic_dec_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline s64 atomic64_dec_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline bool atomic_dec_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool atomic_inc_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline int atomic_add_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline int atomic_sub_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline unsigned long
+cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define cmpxchg(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
+ (unsigned long)(new), sizeof(*(ptr)))); \
+})
+
+static __always_inline unsigned long
+sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define sync_cmpxchg(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
+ (unsigned long)(old), (unsigned long)(new), \
+ sizeof(*(ptr)))); \
+})
+
+static __always_inline unsigned long
+cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define cmpxchg_local(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
+ (unsigned long)(old), (unsigned long)(new), \
+ sizeof(*(ptr)))); \
+})
+
+static __always_inline u64
+cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
+{
+ kasan_check_write(ptr, sizeof(*ptr));
+ return arch_cmpxchg64(ptr, old, new);
+}
+
+#define cmpxchg64(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
+ (u64)(new))); \
+})
+
+static __always_inline u64
+cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
+{
+ kasan_check_write(ptr, sizeof(*ptr));
+ return arch_cmpxchg64_local(ptr, old, new);
+}
+
+#define cmpxchg64_local(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
+ (u64)(new))); \
+})
+
+/*
+ * Originally we had the following code here:
+ * __typeof__(p1) ____p1 = (p1);
+ * kasan_check_write(____p1, 2 * sizeof(*____p1));
+ * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
+ * But it leads to compilation failures (see gcc issue 72873).
+ * So for now it's left non-instrumented.
+ * There are few callers of cmpxchg_double(), so it's not critical.
+ */
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+({ \
+ arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 8f22f55de17a..1a29b2a0282b 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -8,10 +8,11 @@
typedef struct { pgd_t pgd; } p4d_t;
-#define P4D_SHIFT PGDIR_SHIFT
-#define PTRS_PER_P4D 1
-#define P4D_SIZE (1UL << P4D_SHIFT)
-#define P4D_MASK (~(P4D_SIZE-1))
+#define P4D_SHIFT PGDIR_SHIFT
+#define MAX_PTRS_PER_P4D 1
+#define PTRS_PER_P4D 1
+#define P4D_SIZE (1UL << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2cfa3075d148..bfbb44a5ad38 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -983,6 +983,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud);
+int pmd_free_pte_page(pmd_t *pmd);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1008,6 +1010,14 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
+static inline int pud_free_pmd_page(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd)
+{
+ return 0;
+}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 599028f66585..6c731c52c071 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -45,6 +45,7 @@ enum amd_asic_type {
CHIP_POLARIS11,
CHIP_POLARIS12,
CHIP_VEGA10,
+ CHIP_VEGA12,
CHIP_RAVEN,
CHIP_LAST,
};
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index c99d6eaef1ac..e9a1116d2f8e 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -13,6 +13,8 @@
#include <drm/drm_crtc.h>
+struct analogix_dp_device;
+
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
@@ -29,6 +31,7 @@ struct analogix_dp_plat_data {
struct drm_panel *panel;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ bool skip_connector;
int (*power_on)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
@@ -38,16 +41,17 @@ struct analogix_dp_plat_data {
struct drm_connector *);
};
-int analogix_dp_psr_supported(struct device *dev);
-int analogix_dp_enable_psr(struct device *dev);
-int analogix_dp_disable_psr(struct device *dev);
+int analogix_dp_psr_enabled(struct analogix_dp_device *dp);
+int analogix_dp_enable_psr(struct analogix_dp_device *dp);
+int analogix_dp_disable_psr(struct analogix_dp_device *dp);
-int analogix_dp_resume(struct device *dev);
-int analogix_dp_suspend(struct device *dev);
+int analogix_dp_resume(struct analogix_dp_device *dp);
+int analogix_dp_suspend(struct analogix_dp_device *dp);
-int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data);
-void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
+struct analogix_dp_device *
+analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+ struct analogix_dp_plat_data *plat_data);
+void analogix_dp_unbind(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 182f83283e24..dd2a8cf7d20b 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -143,12 +143,13 @@ struct dw_hdmi_plat_data {
unsigned long mpixelclock);
};
-int dw_hdmi_probe(struct platform_device *pdev,
- const struct dw_hdmi_plat_data *plat_data);
-void dw_hdmi_remove(struct platform_device *pdev);
-void dw_hdmi_unbind(struct device *dev);
-int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_hdmi_plat_data *plat_data);
+struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
+ const struct dw_hdmi_plat_data *plat_data);
+void dw_hdmi_remove(struct dw_hdmi *hdmi);
+void dw_hdmi_unbind(struct dw_hdmi *hdmi);
+struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_hdmi_plat_data *plat_data);
void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
@@ -157,7 +158,18 @@ void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
/* PHY configuration */
+void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr);
+void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable);
+void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable);
+void dw_hdmi_phy_reset(struct dw_hdmi *hdmi);
+
+enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
+ void *data);
+void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
+ bool force, bool disabled, bool rxsense);
+void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 9b30fec302c8..d9c6d549f971 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -10,6 +10,8 @@
#ifndef __DW_MIPI_DSI__
#define __DW_MIPI_DSI__
+struct dw_mipi_dsi;
+
struct dw_mipi_dsi_phy_ops {
int (*init)(void *priv_data);
int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode,
@@ -29,11 +31,14 @@ struct dw_mipi_dsi_plat_data {
void *priv_data;
};
-int dw_mipi_dsi_probe(struct platform_device *pdev,
- const struct dw_mipi_dsi_plat_data *plat_data);
-void dw_mipi_dsi_remove(struct platform_device *pdev);
-int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data *plat_data);
-void dw_mipi_dsi_unbind(struct device *dev);
+struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data
+ *plat_data);
+void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
+struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_mipi_dsi_plat_data
+ *plat_data);
+void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index cf13842a6dbd..a57a8aa90ffb 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -154,7 +154,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
s32 __user *out_fence_ptr;
- unsigned last_vblank_count;
+ u64 last_vblank_count;
};
struct __drm_connnectors_state {
@@ -754,6 +754,28 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic
+ * update in reverse order
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all planes in an atomic update in reverse order,
+ * tracking both old and new state. This is useful in places where the
+ * state delta needs to be considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (old_plane_state) = (__state)->planes[__i].old_state,\
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 4842ee9485ce..26aaba58d6ce 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -40,7 +40,6 @@ int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
- const struct drm_rect *clip,
int min_scale,
int max_scale,
bool can_position,
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 682d01ba920c..3270fec46979 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -29,6 +29,7 @@
#include <drm/drm_modes.h>
struct drm_bridge;
+struct drm_bridge_timings;
struct drm_panel;
/**
@@ -90,7 +91,7 @@ struct drm_bridge_funcs {
*
* drm_mode_status Enum
*/
- enum drm_mode_status (*mode_valid)(struct drm_bridge *crtc,
+ enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge,
const struct drm_display_mode *mode);
/**
@@ -223,12 +224,43 @@ struct drm_bridge_funcs {
};
/**
+ * struct drm_bridge_timings - timing information for the bridge
+ */
+struct drm_bridge_timings {
+ /**
+ * @sampling_edge:
+ *
+ * Tells whether the bridge samples the digital input signal
+ * from the display engine on the positive or negative edge of the
+ * clock, this should reuse the DRM_BUS_FLAG_PIXDATA_[POS|NEG]EDGE
+ * bitwise flags from the DRM connector (bit 2 and 3 valid).
+ */
+ u32 sampling_edge;
+ /**
+ * @setup_time_ps:
+ *
+ * Defines the time in picoseconds the input data lines must be
+ * stable before the clock edge.
+ */
+ u32 setup_time_ps;
+ /**
+ * @hold_time_ps:
+ *
+ * Defines the time in picoseconds taken for the bridge to sample the
+ * input signal after the clock edge.
+ */
+ u32 hold_time_ps;
+};
+
+/**
* struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @encoder: encoder to which this bridge is connected
* @next: the next bridge in the encoder chain
* @of_node: device node pointer to the bridge
* @list: to keep track of all added bridges
+ * @timings: the timing specification for the bridge, if any (may
+ * be NULL)
* @funcs: control functions
* @driver_private: pointer to the bridge driver's internal context
*/
@@ -240,6 +272,7 @@ struct drm_bridge {
struct device_node *of_node;
#endif
struct list_head list;
+ const struct drm_bridge_timings *timings;
const struct drm_bridge_funcs *funcs;
void *driver_private;
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index beab0f0d0cfb..bfe1639df02d 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -38,6 +38,8 @@
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
+u64 drm_get_max_iomem(void);
+
static inline bool drm_arch_can_wc_memory(void)
{
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 03a59cbce621..44f04233e3db 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -26,6 +26,7 @@
#include <linux/ctype.h>
struct drm_crtc;
+struct drm_plane;
uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
@@ -37,4 +38,34 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
+/**
+ * drm_color_lut_size - calculate the number of entries in the LUT
+ * @blob: blob containing the LUT
+ *
+ * Returns:
+ * The number of entries in the color LUT stored in @blob.
+ */
+static inline int drm_color_lut_size(const struct drm_property_blob *blob)
+{
+ return blob->length / sizeof(struct drm_color_lut);
+}
+
+enum drm_color_encoding {
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_BT2020,
+ DRM_COLOR_ENCODING_MAX,
+};
+
+enum drm_color_range {
+ DRM_COLOR_YCBCR_LIMITED_RANGE,
+ DRM_COLOR_YCBCR_FULL_RANGE,
+ DRM_COLOR_RANGE_MAX,
+};
+
+int drm_plane_create_color_properties(struct drm_plane *plane,
+ u32 supported_encodings,
+ u32 supported_ranges,
+ enum drm_color_encoding default_encoding,
+ enum drm_color_range default_range);
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ed38df4ac204..675cc3f8cf85 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -342,7 +342,11 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
/**
* struct drm_tv_connector_state - TV connector related states
* @subconnector: selected subconnector
- * @margins: left/right/top/bottom margins
+ * @margins: margins
+ * @margins.left: left margin
+ * @margins.right: right margin
+ * @margins.top: top margin
+ * @margins.bottom: bottom margin
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -419,6 +423,12 @@ struct drm_connector_state {
* upscaling, mostly used for built-in panels.
*/
unsigned int scaling_mode;
+
+ /**
+ * @content_protection: Connector property to request content
+ * protection. This is most commonly used for HDCP.
+ */
+ unsigned int content_protection;
};
/**
@@ -766,6 +776,7 @@ struct drm_cmdline_mode {
* @tile_h_size: horizontal size of this tile.
* @tile_v_size: vertical size of this tile.
* @scaling_mode_property: Optional atomic property to control the upscaling.
+ * @content_protection_property: Optional property to control content protection
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -857,6 +868,12 @@ struct drm_connector {
struct drm_property *scaling_mode_property;
/**
+ * @content_protection_property: DRM ENUM property for content
+ * protection
+ */
+ struct drm_property *content_protection_property;
+
+ /**
* @path_blob_ptr:
*
* DRM blob property data for the DP MST path property.
@@ -1065,6 +1082,7 @@ const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
+const char *drm_get_content_protection_name(int val);
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
int drm_mode_create_tv_properties(struct drm_device *dev,
@@ -1073,6 +1091,8 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
+int drm_connector_attach_content_protection_property(
+ struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index da58a428c8d7..62903bae0221 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -75,6 +75,7 @@
#define DP_MAX_DOWNSPREAD 0x003
# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+# define DP_TPS4_SUPPORTED (1 << 7)
#define DP_NORP 0x004
@@ -287,6 +288,7 @@
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
@@ -328,12 +330,20 @@
# define DP_DS_12BPC 2
# define DP_DS_16BPC 3
+/* DP Forward error Correction Registers */
+#define DP_FEC_CAPABILITY 0x090 /* 1.4 */
+# define DP_FEC_CAPABLE (1 << 0)
+# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
+# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
+# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
+# define DP_LINK_BW_8_1 0x1e /* 1.4 */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
@@ -344,7 +354,9 @@
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
+# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
# define DP_TRAINING_PATTERN_MASK 0x3
+# define DP_TRAINING_PATTERN_MASK_1_4 0xf
/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
@@ -441,6 +453,19 @@
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
+#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
+# define DP_FEC_READY (1 << 0)
+# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
+# define DP_FEC_ERR_COUNT_DIS (0 << 1)
+# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1)
+# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1)
+# define DP_FEC_BIT_ERROR_COUNT (3 << 1)
+# define DP_FEC_LANE_SELECT_MASK (3 << 4)
+# define DP_FEC_LANE_0_SELECT (0 << 4)
+# define DP_FEC_LANE_1_SELECT (1 << 4)
+# define DP_FEC_LANE_2_SELECT (2 << 4)
+# define DP_FEC_LANE_3_SELECT (3 << 4)
+
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
@@ -616,6 +641,16 @@
#define DP_TEST_SINK 0x270
# define DP_TEST_SINK_START (1 << 0)
+#define DP_FEC_STATUS 0x280 /* 1.4 */
+# define DP_FEC_DECODE_EN_DETECTED (1 << 0)
+# define DP_FEC_DECODE_DIS_DETECTED (1 << 1)
+
+#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */
+
+#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */
+# define DP_FEC_ERROR_COUNT_MASK 0x7F
+# define DP_FEC_ERR_COUNT_VALID (1 << 7)
+
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
@@ -836,6 +871,23 @@
#define DP_CEC_TX_MESSAGE_BUFFER 0x3020
#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10
+#define DP_AUX_HDCP_BKSV 0x68000
+#define DP_AUX_HDCP_RI_PRIME 0x68005
+#define DP_AUX_HDCP_AKSV 0x68007
+#define DP_AUX_HDCP_AN 0x6800C
+#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4)
+#define DP_AUX_HDCP_BCAPS 0x68028
+# define DP_BCAPS_REPEATER_PRESENT BIT(1)
+# define DP_BCAPS_HDCP_CAPABLE BIT(0)
+#define DP_AUX_HDCP_BSTATUS 0x68029
+# define DP_BSTATUS_REAUTH_REQ BIT(3)
+# define DP_BSTATUS_LINK_FAILURE BIT(2)
+# define DP_BSTATUS_R0_PRIME_READY BIT(1)
+# define DP_BSTATUS_READY BIT(0)
+#define DP_AUX_HDCP_BINFO 0x6802A
+#define DP_AUX_HDCP_KSV_FIFO 0x6802C
+#define DP_AUX_HDCP_AINFO 0x6803B
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -971,6 +1023,20 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x14 &&
+ dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
+}
+
+static inline u8
+drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
+ DP_TRAINING_PATTERN_MASK;
+}
+
+static inline bool
drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 6942e84b6edd..3e86408dac9f 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -38,6 +38,7 @@ struct drm_mode_fb_cmd2;
* @cpp: Number of bytes per pixel (per plane)
* @hsub: Horizontal chroma subsampling factor
* @vsub: Vertical chroma subsampling factor
+ * @has_alpha: Does the format embeds an alpha component?
*/
struct drm_format_info {
u32 format;
@@ -46,6 +47,7 @@ struct drm_format_info {
u8 cpp[3];
u8 hsub;
u8 vsub;
+ bool has_alpha;
};
/**
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 9c55c2acaa2b..3583b98a1718 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -116,21 +116,6 @@ struct drm_gem_object {
int name;
/**
- * @read_domains:
- *
- * Read memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated.
- */
- uint32_t read_domains;
-
- /**
- * @write_domain: Corresponding unique write memory domain.
- */
- uint32_t write_domain;
-
- /**
* @dma_buf:
*
* dma-buf associated with this GEM object.
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
new file mode 100644
index 000000000000..562fa7df2637
--- /dev/null
+++ b/include/drm/drm_hdcp.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#ifndef _DRM_HDCP_H_INCLUDED_
+#define _DRM_HDCP_H_INCLUDED_
+
+/* Period of hdcp checks (to ensure we're still authenticated) */
+#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16)
+
+/* Shared lengths/masks between HDMI/DVI/DisplayPort */
+#define DRM_HDCP_AN_LEN 8
+#define DRM_HDCP_BSTATUS_LEN 2
+#define DRM_HDCP_KSV_LEN 5
+#define DRM_HDCP_RI_LEN 2
+#define DRM_HDCP_V_PRIME_PART_LEN 4
+#define DRM_HDCP_V_PRIME_NUM_PARTS 5
+#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f)
+#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3))
+#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7))
+
+/* Slave address for the HDCP registers in the receiver */
+#define DRM_HDCP_DDC_ADDR 0x3A
+
+/* HDCP register offsets for HDMI/DVI devices */
+#define DRM_HDCP_DDC_BKSV 0x00
+#define DRM_HDCP_DDC_RI_PRIME 0x08
+#define DRM_HDCP_DDC_AKSV 0x10
+#define DRM_HDCP_DDC_AN 0x18
+#define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4)
+#define DRM_HDCP_DDC_BCAPS 0x40
+#define DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT BIT(6)
+#define DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY BIT(5)
+#define DRM_HDCP_DDC_BSTATUS 0x41
+#define DRM_HDCP_DDC_KSV_FIFO 0x43
+
+#endif
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 2cb6f02df64a..7569f22ffef6 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -36,6 +36,7 @@ struct drm_device;
struct drm_atomic_state;
struct drm_mode_fb_cmd2;
struct drm_format_info;
+struct drm_display_mode;
/**
* struct drm_mode_config_funcs - basic driver provided mode setting functions
@@ -102,6 +103,17 @@ struct drm_mode_config_funcs {
void (*output_poll_changed)(struct drm_device *dev);
/**
+ * @mode_valid:
+ *
+ * Device specific validation of display modes. Can be used to reject
+ * modes that can never be supported. Only device wide constraints can
+ * be checked here. crtc/encoder/bridge/connector specific constraints
+ * should be checked in the .mode_valid() hook for each specific object.
+ */
+ enum drm_mode_status (*mode_valid)(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+
+ /**
* @atomic_check:
*
* This is the only hook to validate an atomic modeset update. This
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 7ba3913f30b5..c34a3e8030e1 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -120,30 +120,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_get(struct drm_mode_object *obj);
void drm_mode_object_put(struct drm_mode_object *obj);
-/**
- * drm_mode_object_reference - acquire a mode object reference
- * @obj: DRM mode object
- *
- * This is a compatibility alias for drm_mode_object_get() and should not be
- * used by new code.
- */
-static inline void drm_mode_object_reference(struct drm_mode_object *obj)
-{
- drm_mode_object_get(obj);
-}
-
-/**
- * drm_mode_object_unreference - release a mode object reference
- * @obj: DRM mode object
- *
- * This is a compatibility alias for drm_mode_object_put() and should not be
- * used by new code.
- */
-static inline void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
- drm_mode_object_put(obj);
-}
-
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 9f3421c8efcd..0d310beae6af 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -131,9 +131,6 @@ enum drm_mode_status {
MODE_ERROR = -1
};
-#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
- DRM_MODE_TYPE_CRTC_C)
-
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
.name = nm, .status = 0, .type = (t), .clock = (c), \
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
@@ -245,26 +242,25 @@ struct drm_display_mode {
* A bitmask of flags, mostly about the source of a mode. Possible flags
* are:
*
- * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively
- * unused.
* - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
* resolution of an LCD panel. There should only be one preferred
* mode per connector at any given time.
* - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
* them really. Drivers must set this bit for all modes they create
* and expose to userspace.
+ * - DRM_MODE_TYPE_USERDEF: Mode defined via kernel command line
*
* Plus a big list of flags which shouldn't be used at all, but are
- * still around since these flags are also used in the userspace ABI:
+ * still around since these flags are also used in the userspace ABI.
+ * We no longer accept modes with these types though:
*
+ * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused.
+ * Use DRM_MODE_TYPE_DRIVER instead.
* - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
* DRM_MODE_TYPE_PREFERRED instead.
* - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
* which are stuck around for hysterical raisins only. No one has an
* idea what they were meant for. Don't use.
- * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige
- * from older kms designs where userspace had to first add a custom
- * mode to the kernel's mode list before it could use it. Don't use.
*/
unsigned int type;
@@ -299,8 +295,8 @@ struct drm_display_mode {
* - DRM_MODE_FLAG_PCSYNC: composite sync is active high.
* - DRM_MODE_FLAG_NCSYNC: composite sync is active low.
* - DRM_MODE_FLAG_HSKEW: hskew provided (not used?).
- * - DRM_MODE_FLAG_BCAST: not used?
- * - DRM_MODE_FLAG_PIXMUX: not used?
+ * - DRM_MODE_FLAG_BCAST: <deprecated>
+ * - DRM_MODE_FLAG_PIXMUX: <deprecated>
* - DRM_MODE_FLAG_DBLCLK: double-clocked mode.
* - DRM_MODE_FLAG_CLKDIV2: half-clocked mode.
*
@@ -448,7 +444,8 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in);
-int drm_mode_convert_umode(struct drm_display_mode *out,
+int drm_mode_convert_umode(struct drm_device *dev,
+ struct drm_display_mode *out,
const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
@@ -501,7 +498,8 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
/* for use by the crtc helper probe functions */
-enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
+enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev,
+ const struct drm_display_mode *mode);
enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY);
enum drm_mode_status
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8185e3468a23..f7bf4a48b1c3 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_color_mgmt.h>
struct drm_crtc;
struct drm_printer;
@@ -112,6 +113,20 @@ struct drm_plane_state {
unsigned int zpos;
unsigned int normalized_zpos;
+ /**
+ * @color_encoding:
+ *
+ * Color encoding for non RGB formats
+ */
+ enum drm_color_encoding color_encoding;
+
+ /**
+ * @color_range:
+ *
+ * Color range for non RGB formats
+ */
+ enum drm_color_range color_range;
+
/* Clipped coordinates */
struct drm_rect src, dst;
@@ -558,6 +573,23 @@ struct drm_plane {
struct drm_property *zpos_property;
struct drm_property *rotation_property;
+
+ /**
+ * @color_encoding_property:
+ *
+ * Optional "COLOR_ENCODING" enum property for specifying
+ * color encoding for non RGB formats.
+ * See drm_plane_create_color_properties().
+ */
+ struct drm_property *color_encoding_property;
+ /**
+ * @color_range_property:
+ *
+ * Optional "COLOR_RANGE" enum property for specifying
+ * color range for non RGB formats.
+ * See drm_plane_create_color_properties().
+ */
+ struct drm_property *color_range_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 8aa49c0ecd4d..28d7ce620729 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,7 +43,6 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dest,
- const struct drm_rect *clip,
unsigned int rotation,
int min_scale,
int max_scale,
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 9cd9e36f77b5..4d5f5d6cf6a6 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -54,6 +54,9 @@ struct device;
struct dma_buf_export_info;
struct dma_buf;
+struct dma_buf_attachment;
+
+enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
@@ -79,6 +82,25 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+ struct dma_buf_attachment *attach);
+void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir);
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num);
+void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr);
+void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
+void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
+ void *addr);
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 2a4a42e59a47..e1a46e9991cc 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -196,21 +196,22 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_STATE 0x40
#define DRM_UT_LEASE 0x80
-__printf(6, 7)
+__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...);
+ const char *format, ...);
__printf(3, 4)
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...);
+void drm_dev_dbg(const struct device *dev, unsigned int category,
+ const char *format, ...);
+
+__printf(2, 3)
+void drm_dbg(unsigned int category, const char *format, ...);
+__printf(1, 2)
+void drm_err(const char *format, ...);
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
- do { \
- printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
- ##__VA_ARGS__); \
- } while (0)
+ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
@@ -233,10 +234,9 @@ void drm_printk(const char *level, unsigned int category,
* @fmt: printf() like format string.
*/
#define DRM_DEV_ERROR(dev, fmt, ...) \
- drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
- fmt, ##__VA_ARGS__)
+ drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
#define DRM_ERROR(fmt, ...) \
- drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
+ drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -257,8 +257,7 @@ void drm_printk(const char *level, unsigned int category,
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO(dev, fmt, ...) \
- drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
- ##__VA_ARGS__)
+ drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
@@ -275,53 +274,46 @@ void drm_printk(const char *level, unsigned int category,
* @dev: device pointer
* @fmt: printf() like format string.
*/
-#define DRM_DEV_DEBUG(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
- ##args)
+#define DRM_DEV_DEBUG(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
#define DRM_DEBUG(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
- fmt, ##args)
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_DRIVER(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG_KMS(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS(fmt, ...) \
+ drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
- fmt, ##args)
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_PRIME(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
- fmt, ##args)
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_ATOMIC(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG_VBL(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_VBL(fmt, ...) \
+ drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_LEASE(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
- __func__, "", fmt, ##args); \
+ drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
})
/**
@@ -330,21 +322,28 @@ void drm_printk(const char *level, unsigned int category,
* @dev: device pointer
* @fmt: printf() like format string.
*/
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
- DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
-#define DRM_DEBUG_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
+ _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 8a522b4bed40..d1423c7f3c73 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -209,7 +209,7 @@ struct drm_property_blob {
struct list_head head_global;
struct list_head head_file;
size_t length;
- unsigned char data[];
+ void *data;
};
struct drm_prop_enum_list {
@@ -237,27 +237,29 @@ static inline bool drm_property_type_is(struct drm_property *property,
return property->flags & type;
}
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values);
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
- const char *name,
+struct drm_property *drm_property_create(struct drm_device *dev,
+ u32 flags, const char *name,
+ int num_values);
+struct drm_property *drm_property_create_enum(struct drm_device *dev,
+ u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
- int flags, const char *name,
+ u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_props,
uint64_t supported_bits);
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
- const char *name,
+struct drm_property *drm_property_create_range(struct drm_device *dev,
+ u32 flags, const char *name,
uint64_t min, uint64_t max);
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
- int flags, const char *name,
+ u32 flags, const char *name,
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
- int flags, const char *name, uint32_t type);
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
- const char *name);
+ u32 flags, const char *name,
+ uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev,
+ u32 flags, const char *name);
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 6d9adbb46293..1b4e352143fd 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -22,6 +22,41 @@ struct drm_simple_display_pipe;
*/
struct drm_simple_display_pipe_funcs {
/**
+ * @mode_valid:
+ *
+ * This callback is used to check if a specific mode is valid in the
+ * crtc used in this simple display pipe. This should be implemented
+ * if the display pipe has some sort of restriction in the modes
+ * it can display. For example, a given display pipe may be responsible
+ * to set a clock value. If the clock can not produce all the values
+ * for the available modes then this callback can be used to restrict
+ * the number of modes to only the ones that can be displayed. Another
+ * reason can be bandwidth mitigation: the memory port on the display
+ * controller can have bandwidth limitations not allowing pixel data
+ * to be fetched at any rate.
+ *
+ * This hook is used by the probe helpers to filter the mode list in
+ * drm_helper_probe_single_connector_modes(), and it is used by the
+ * atomic helpers to validate modes supplied by userspace in
+ * drm_atomic_helper_check_modeset().
+ *
+ * This function is optional.
+ *
+ * NOTE:
+ *
+ * Since this function is both called from the check phase of an atomic
+ * commit, and the mode validation in the probe paths it is not allowed
+ * to look at anything else but the passed-in mode, and validate it
+ * against configuration-invariant hardware constraints.
+ *
+ * RETURNS:
+ *
+ * drm_mode_status Enum
+ */
+ enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+
+ /**
* @enable:
*
* This function should be used to enable the pipeline.
@@ -93,6 +128,24 @@ struct drm_simple_display_pipe_funcs {
*/
void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
+
+ /**
+ * @enable_vblank:
+ *
+ * Optional, called by &drm_crtc_funcs.enable_vblank. Please read
+ * the documentation for the &drm_crtc_funcs.enable_vblank hook for
+ * more details.
+ */
+ int (*enable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @disable_vblank:
+ *
+ * Optional, called by &drm_crtc_funcs.disable_vblank. Please read
+ * the documentation for the &drm_crtc_funcs.disable_vblank hook for
+ * more details.
+ */
+ void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
};
/**
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 848b463a0af5..d25a9603ab57 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -55,8 +55,24 @@ struct drm_pending_vblank_event {
* @event: Actual event which will be sent to userspace.
*/
union {
+ /**
+ * @event.base: DRM event base class.
+ */
struct drm_event base;
+
+ /**
+ * @event.vbl:
+ *
+ * Event payload for vblank events, requested through
+ * either the MODE_PAGE_FLIP or MODE_ATOMIC IOCTL. Also
+ * generated by the legacy WAIT_VBLANK IOCTL, but new userspace
+ * should use MODE_QUEUE_SEQUENCE and &event.seq instead.
+ */
struct drm_event_vblank vbl;
+
+ /**
+ * @event.seq: Event payload for the MODE_QUEUEU_SEQUENCE IOCTL.
+ */
struct drm_event_crtc_sequence seq;
} event;
};
@@ -179,7 +195,9 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
+u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
+void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
+void drm_crtc_vblank_restore(struct drm_crtc *crtc);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 545c6e0fea7d..346b1f5cb180 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -26,9 +26,8 @@
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
- * 5 should be enough as only HSW, BDW, SKL need such fix.
*/
-#define MAX_PORTS 5
+#define MAX_PORTS 6
/**
* struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 5db0458dd832..70f0c2535b87 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -414,24 +414,33 @@
INTEL_CFL_U_GT2_IDS(info), \
INTEL_CFL_U_GT3_IDS(info)
-/* CNL U 2+2 */
-#define INTEL_CNL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x5A52, info), \
- INTEL_VGA_DEVICE(0x5A5A, info), \
- INTEL_VGA_DEVICE(0x5A42, info), \
- INTEL_VGA_DEVICE(0x5A4A, info)
-
-/* CNL Y 2+2 */
-#define INTEL_CNL_Y_GT2_IDS(info) \
+/* CNL */
+#define INTEL_CNL_IDS(info) \
INTEL_VGA_DEVICE(0x5A51, info), \
INTEL_VGA_DEVICE(0x5A59, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
INTEL_VGA_DEVICE(0x5A49, info), \
- INTEL_VGA_DEVICE(0x5A71, info), \
- INTEL_VGA_DEVICE(0x5A79, info)
-
-#define INTEL_CNL_IDS(info) \
- INTEL_CNL_U_GT2_IDS(info), \
- INTEL_CNL_Y_GT2_IDS(info)
+ INTEL_VGA_DEVICE(0x5A52, info), \
+ INTEL_VGA_DEVICE(0x5A5A, info), \
+ INTEL_VGA_DEVICE(0x5A42, info), \
+ INTEL_VGA_DEVICE(0x5A4A, info), \
+ INTEL_VGA_DEVICE(0x5A50, info), \
+ INTEL_VGA_DEVICE(0x5A40, info), \
+ INTEL_VGA_DEVICE(0x5A54, info), \
+ INTEL_VGA_DEVICE(0x5A5C, info), \
+ INTEL_VGA_DEVICE(0x5A44, info), \
+ INTEL_VGA_DEVICE(0x5A4C, info)
+
+/* ICL */
+#define INTEL_ICL_11_IDS(info) \
+ INTEL_VGA_DEVICE(0x8A50, info), \
+ INTEL_VGA_DEVICE(0x8A51, info), \
+ INTEL_VGA_DEVICE(0x8A5C, info), \
+ INTEL_VGA_DEVICE(0x8A5D, info), \
+ INTEL_VGA_DEVICE(0x8A52, info), \
+ INTEL_VGA_DEVICE(0x8A5A, info), \
+ INTEL_VGA_DEVICE(0x8A5B, info), \
+ INTEL_VGA_DEVICE(0x8A71, info), \
+ INTEL_VGA_DEVICE(0x8A70, info)
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/tinydrm/ili9341.h b/include/drm/tinydrm/ili9341.h
deleted file mode 100644
index 807a09f43cad..000000000000
--- a/include/drm/tinydrm/ili9341.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * ILI9341 LCD controller
- *
- * Copyright 2016 Noralf Trønnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_ILI9341_H
-#define __LINUX_ILI9341_H
-
-#define ILI9341_FRMCTR1 0xb1
-#define ILI9341_FRMCTR2 0xb2
-#define ILI9341_FRMCTR3 0xb3
-#define ILI9341_INVTR 0xb4
-#define ILI9341_PRCTR 0xb5
-#define ILI9341_DISCTRL 0xb6
-#define ILI9341_ETMOD 0xb7
-
-#define ILI9341_PWCTRL1 0xc0
-#define ILI9341_PWCTRL2 0xc1
-#define ILI9341_VMCTRL1 0xc5
-#define ILI9341_VMCTRL2 0xc7
-#define ILI9341_PWCTRLA 0xcb
-#define ILI9341_PWCTRLB 0xcf
-
-#define ILI9341_RDID1 0xda
-#define ILI9341_RDID2 0xdb
-#define ILI9341_RDID3 0xdc
-#define ILI9341_RDID4 0xd3
-
-#define ILI9341_PGAMCTRL 0xe0
-#define ILI9341_NGAMCTRL 0xe1
-#define ILI9341_DGAMCTRL1 0xe2
-#define ILI9341_DGAMCTRL2 0xe3
-#define ILI9341_DTCTRLA 0xe8
-#define ILI9341_DTCTRLB 0xea
-#define ILI9341_PWRSEQ 0xed
-
-#define ILI9341_EN3GAM 0xf2
-#define ILI9341_IFCTRL 0xf6
-#define ILI9341_PUMPCTRL 0xf7
-
-#define ILI9341_MADCTL_MH BIT(2)
-#define ILI9341_MADCTL_BGR BIT(3)
-#define ILI9341_MADCTL_ML BIT(4)
-#define ILI9341_MADCTL_MV BIT(5)
-#define ILI9341_MADCTL_MX BIT(6)
-#define ILI9341_MADCTL_MY BIT(7)
-
-#endif /* __LINUX_ILI9341_H */
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index 5d0e82b36eaf..44e824af2ef6 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -67,11 +67,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
const struct drm_simple_display_pipe_funcs *pipe_funcs,
struct drm_driver *driver,
const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state);
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
+int mipi_dbi_poweron_reset(struct mipi_dbi *mipi);
+int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi);
u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index d554ded60ee9..0a4ddbc04c60 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -46,10 +46,6 @@ void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
struct drm_clip_rect *clip);
-struct backlight_device *tinydrm_of_find_backlight(struct device *dev);
-int tinydrm_enable_backlight(struct backlight_device *backlight);
-int tinydrm_disable_backlight(struct backlight_device *backlight);
-
size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len);
bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw);
int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2cd025c2abe7..c67977aa1a0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -41,6 +41,8 @@
#include <linux/bitmap.h>
#include <linux/reservation.h>
+struct ttm_bo_global;
+
struct ttm_bo_device;
struct drm_mm_node;
@@ -169,7 +171,6 @@ struct ttm_buffer_object {
* Members constant at init.
*/
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
@@ -263,8 +264,8 @@ struct ttm_bo_kmap_obj {
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
- * @allow_reserved_eviction: Allow eviction of reserved BOs.
* @resv: Reservation object to allow reserved evictions with.
+ * @flags: Including the following flags
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
@@ -272,11 +273,16 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
- bool allow_reserved_eviction;
struct reservation_object *resv;
uint64_t bytes_moved;
+ uint32_t flags;
};
+/* Allow eviction of reserved BOs */
+#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
+/* when serving page fault or suspend, allow alloc anyway */
+#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
+
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
@@ -461,11 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
* @acc_size: Accounted size for this object.
* @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
@@ -498,7 +499,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
- struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
struct reservation_object *resv,
@@ -515,7 +515,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
@@ -545,8 +544,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
unsigned long size, enum ttm_bo_type type,
struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible,
- struct file *persistent_swap_storage, size_t acc_size,
+ uint32_t page_alignment, bool interrubtible, size_t acc_size,
struct sg_table *sg, struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *));
@@ -560,11 +558,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object.
*
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
@@ -577,7 +570,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
enum ttm_bo_type type, struct ttm_placement *placement,
uint32_t page_alignment, bool interruptible,
- struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo);
/**
@@ -717,6 +709,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
+
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
+
/**
* ttm_bo_io
*
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 94064b126e8e..3234cc322e70 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,113 +42,10 @@
#include "ttm_memory.h"
#include "ttm_module.h"
#include "ttm_placement.h"
+#include "ttm_tt.h"
#define TTM_MAX_BO_PRIORITY 4U
-struct ttm_backend_func {
- /**
- * struct ttm_backend_func member bind
- *
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_mem_reg describing the
- * memory type and location for binding.
- *
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-
- /**
- * struct ttm_backend_func member unbind
- *
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- int (*unbind) (struct ttm_tt *ttm);
-
- /**
- * struct ttm_backend_func member destroy
- *
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
- */
- void (*destroy) (struct ttm_tt *ttm);
-};
-
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
-/**
- * struct ttm_tt
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
- * pointer.
- * @pages: Array of pages backing the data.
- * @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-
-struct ttm_tt {
- struct ttm_bo_device *bdev;
- struct ttm_backend_func *func;
- struct page *dummy_read_page;
- struct page **pages;
- uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
- struct ttm_bo_global *glob;
- struct file *swap_storage;
- enum ttm_caching_state caching_state;
- enum {
- tt_bound,
- tt_unbound,
- tt_unpopulated,
- } state;
-};
-
-/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
-};
-
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
@@ -328,20 +225,16 @@ struct ttm_bo_driver {
/**
* ttm_tt_create
*
- * @bdev: pointer to a struct ttm_bo_device:
- * @size: Size of the data needed backing.
+ * @bo: The buffer object to create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
- struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page);
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
/**
* ttm_tt_populate
@@ -556,6 +449,7 @@ struct ttm_bo_global {
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
+ * @no_retry: Don't retry allocation if it fails
*
*/
@@ -592,6 +486,8 @@ struct ttm_bo_device {
struct delayed_work wq;
bool need_dma32;
+
+ bool no_retry;
};
/**
@@ -611,100 +507,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
return *old;
}
-/**
- * ttm_tt_init
- *
- * @ttm: The struct ttm_tt.
- * @bdev: pointer to a struct ttm_bo_device:
- * @size: Size of the data needed backing.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page);
-
-/**
- * ttm_tt_fini
- *
- * @ttm: the ttm_tt structure.
- *
- * Free memory of ttm_tt structure
- */
-void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
-
-/**
- * ttm_ttm_bind:
- *
- * @ttm: The struct ttm_tt containing backing pages.
- * @bo_mem: The struct ttm_mem_reg identifying the binding location.
- *
- * Bind the pages of @ttm to an aperture location identified by @bo_mem
- */
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_ttm_destroy:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind, unpopulate and destroy common struct ttm_tt.
- */
-void ttm_tt_destroy(struct ttm_tt *ttm);
-
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-void ttm_tt_unbind(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_swapin:
- *
- * @ttm: The struct ttm_tt.
- *
- * Swap in a previously swap out ttm_tt.
- */
-int ttm_tt_swapin(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_set_placement_caching:
- *
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
- *
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
- */
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
-
-/**
- * ttm_tt_unpopulate - free pages from a ttm
- *
- * @ttm: Pointer to the ttm_tt structure
- *
- * Calls the driver method to free all pages from a ttm
- */
-void ttm_tt_unpopulate(struct ttm_tt *ttm);
-
/*
* ttm_bo.c
*/
@@ -943,9 +745,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bo->glob->lru_lock);
+ spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
+ spin_unlock(&bo->bdev->glob->lru_lock);
}
reservation_object_unlock(bo->resv);
}
@@ -1046,6 +848,15 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
/**
+ * ttm_bo_pipeline_gutting.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Pipelined gutting a BO of it's backing store.
+ */
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+
+/**
* ttm_io_prot
*
* @c_state: Caching state.
@@ -1058,29 +869,4 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
-#if IS_ENABLED(CONFIG_AGP)
-#include <linux/agp_backend.h>
-
-/**
- * ttm_agp_tt_create
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @bridge: The agp bridge this device is sitting on.
- * @size: Size of the data needed backing.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
- *
- *
- * Create a TTM backend that uses the indicated AGP bridge as an aperture
- * for TT memory. This function uses the linux agpgart interface to
- * bind and unbind memory backing a ttm_tt.
- */
-struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page);
-int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
-#endif
-
#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 8936285b6543..737b5fed8003 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -49,6 +49,8 @@
* @work: The workqueue callback for the shrink queue.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
+ * @lower_mem_limit: include lower limit of swap space and lower limit of
+ * system memory.
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
@@ -67,6 +69,7 @@ struct ttm_mem_global {
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
+ uint64_t lower_mem_limit;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
@@ -90,4 +93,6 @@ extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size);
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
+extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
+ uint64_t num_pages, struct ttm_operation_ctx *ctx);
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
new file mode 100644
index 000000000000..c0e928abf592
--- /dev/null
+++ b/include/drm/ttm/ttm_tt.h
@@ -0,0 +1,272 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _TTM_TT_H_
+#define _TTM_TT_H_
+
+#include <linux/types.h>
+
+struct ttm_tt;
+struct ttm_mem_reg;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+#define TTM_PAGE_FLAG_WRITE (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
+#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
+
+enum ttm_caching_state {
+ tt_uncached,
+ tt_wc,
+ tt_cached
+};
+
+struct ttm_backend_func {
+ /**
+ * struct ttm_backend_func member bind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ * memory type and location for binding.
+ *
+ * Bind the backend pages into the aperture in the location
+ * indicated by @bo_mem. This function should be able to handle
+ * differences between aperture and system page sizes.
+ */
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+ /**
+ * struct ttm_backend_func member unbind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Unbind previously bound backend pages. This function should be
+ * able to handle differences between aperture and system page sizes.
+ */
+ int (*unbind) (struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_backend_func member destroy
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*destroy) (struct ttm_tt *ttm);
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
+ struct page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
+ struct file *swap_storage;
+ enum ttm_caching_state caching_state;
+ enum {
+ tt_bound,
+ tt_unbound,
+ tt_unpopulated,
+ } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+};
+
+/**
+ * ttm_tt_create
+ *
+ * @bo: pointer to a struct ttm_buffer_object
+ * @zero_alloc: true if allocated pages needs to be zeroed
+ *
+ * Make sure we have a TTM structure allocated for the given BO.
+ * No pages are actually allocated.
+ */
+int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bo: The buffer object we create the ttm for.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+void ttm_tt_fini(struct ttm_tt *ttm);
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+ struct ttm_operation_ctx *ctx);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
+
+/**
+ * ttm_tt_populate - allocate pages for a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to allocate pages for a ttm
+ */
+int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
+#if IS_ENABLED(CONFIG_AGP)
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bo: Buffer object we allocate the ttm for.
+ * @bridge: The agp bridge this device is sitting on.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
+ struct agp_bridge_data *bridge,
+ uint32_t page_flags);
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index cdbd142ca7f2..02924ae2527e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 968173ec2726..15bfb15c2fa5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -623,6 +623,13 @@ bool acpi_gtdt_c3stop(int type);
int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
#endif
+#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
+static inline u64 acpi_arch_get_root_pointer(void)
+{
+ return 0;
+}
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index af7003548593..2baab6f3861d 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -130,6 +130,48 @@ static inline int backlight_update_status(struct backlight_device *bd)
return ret;
}
+/**
+ * backlight_enable - Enable backlight
+ * @bd: the backlight device to enable
+ */
+static inline int backlight_enable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.fb_blank = FB_BLANK_UNBLANK;
+ bd->props.state &= ~BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_disable - Disable backlight
+ * @bd: the backlight device to disable
+ */
+static inline int backlight_disable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.fb_blank = FB_BLANK_POWERDOWN;
+ bd->props.state |= BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_put - Drop backlight reference
+ * @bd: the backlight device to put
+ */
+static inline void backlight_put(struct backlight_device *bd)
+{
+ if (bd)
+ put_device(&bd->dev);
+}
+
extern struct backlight_device *backlight_device_register(const char *name,
struct device *dev, void *devdata, const struct backlight_ops *ops,
const struct backlight_properties *props);
@@ -173,4 +215,20 @@ of_find_backlight_by_node(struct device_node *node)
}
#endif
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+struct backlight_device *of_find_backlight(struct device *dev);
+struct backlight_device *devm_of_find_backlight(struct device *dev);
+#else
+static inline struct backlight_device *of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
+
+static inline struct backlight_device *
+devm_of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 9f242b876fde..f8e76d01a5ad 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -755,13 +755,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e16d07eb08cf..16c3027074a2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -17,6 +17,7 @@
#include <linux/if.h>
#include <linux/fs.h>
#include <linux/aio_abi.h> /* for aio_context_t */
+#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <asm/compat.h>
@@ -550,8 +551,29 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
-extern int put_compat_sigset(compat_sigset_t __user *compat,
- const sigset_t *set, unsigned int size);
+
+/*
+ * Defined inline such that size can be compile time constant, which avoids
+ * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct
+ */
+static inline int
+put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
+ unsigned int size)
+{
+ /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
+#ifdef __BIG_ENDIAN
+ compat_sigset_t v;
+ switch (_NSIG_WORDS) {
+ case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+ }
+ return copy_to_user(compat, &v, size) ? -EFAULT : 0;
+#else
+ return copy_to_user(compat, set, size) ? -EFAULT : 0;
+#endif
+}
asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 46e151172d95..6a86d8db16d9 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -106,6 +106,7 @@ extern void dmi_scan_machine(void);
extern void dmi_memdev_walk(void);
extern void dmi_set_dump_stack_arch_desc(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
+extern int dmi_get_bios_year(void);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
@@ -133,6 +134,7 @@ static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
*dayp = 0;
return false;
}
+static inline int dmi_get_bios_year(void) { return -ENXIO; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 79c413985305..c6baf767619e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1317,6 +1317,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index c332f0a45607..3fdfede2f0f3 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -734,11 +734,7 @@ struct fsl_ifc_nand {
u32 res19[0x10];
__be32 nand_fsr;
u32 res20;
- /* The V1 nand_eccstat is actually 4 words that overlaps the
- * V2 nand_eccstat.
- */
- __be32 v1_nand_eccstat[2];
- __be32 v2_nand_eccstat[6];
+ __be32 nand_eccstat[8];
u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index cf045885a499..6058c3844a76 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -53,6 +53,9 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
/* FIXME: only change from the attr, and don't unregister */
extern int
modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
+extern int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check);
/*
* Kernel breakpoints are not associated with any particular thread.
@@ -97,6 +100,10 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr) { return -ENOSYS; }
+static inline int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check) { return -ENOSYS; }
+
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index b19563f9a8eb..fc08b433c856 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -8,15 +8,28 @@
*/
#ifdef CONFIG_X86
+
+#include <asm/jailhouse_para.h>
#include <asm/x86_init.h>
+
static inline void hypervisor_pin_vcpu(int cpu)
{
x86_platform.hyper.pin_vcpu(cpu);
}
-#else
+
+#else /* !CONFIG_X86 */
+
+#include <linux/of.h>
+
static inline void hypervisor_pin_vcpu(int cpu)
{
}
-#endif
+
+static inline bool jailhouse_paravirt(void)
+{
+ return of_find_compatible_node(NULL, NULL, "jailhouse,cell");
+}
+
+#endif /* !CONFIG_X86 */
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index c5b0a75a7812..fd00170b494f 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -25,6 +25,7 @@ struct ptr_ring *tun_get_tx_ring(struct file *file);
bool tun_is_xdp_buff(void *ptr);
void *tun_xdp_to_ptr(void *ptr);
void *tun_ptr_to_xdp(void *ptr);
+void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -50,5 +51,8 @@ static inline void *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
+static inline void tun_ptr_free(void *ptr)
+{
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 5e6a2d4dc366..7d30892da064 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -300,32 +300,47 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * __vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
*
- * Inserts the VLAN tag into @skb as part of the payload
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline int __vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci,
+ unsigned int mac_len)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
- veth = skb_push(skb, VLAN_HLEN);
+ skb_push(skb, VLAN_HLEN);
- /* Move the mac addresses to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+ /* Move the mac header sans proto to the beginning of the new header. */
+ if (likely(mac_len > ETH_TLEN))
+ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
skb->mac_header -= VLAN_HLEN;
+ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
+
/* first, the ethernet type */
- veth->h_vlan_proto = vlan_proto;
+ if (likely(mac_len >= ETH_TLEN)) {
+ /* h_vlan_encapsulated_proto should already be populated, and
+ * skb->data has space for h_vlan_proto
+ */
+ veth->h_vlan_proto = vlan_proto;
+ } else {
+ /* h_vlan_encapsulated_proto should not be populated, and
+ * skb->data has no space for h_vlan_proto
+ */
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+ }
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
@@ -334,12 +349,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
+ * Returns error if skb_cow_head failes.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
+ * vlan_insert_inner_tag - inner VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
+ *
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
@@ -347,12 +380,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci,
+ unsigned int mac_len)
{
int err;
- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
@@ -361,6 +396,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
}
/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69c238210325..5426627f9c55 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -4,9 +4,7 @@
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
-#include <linux/linkage.h>
#include <linux/bitops.h>
-#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c00c4c33e432..b26eccc78fb1 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -503,6 +503,7 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
+#define ICH_HCR_NPIE (1 << 3)
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index d3453ee072fc..68d8b1f73682 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -84,6 +84,7 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_NPIE (1 << 3)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2168cc6b8b30..b46b541c67c4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -151,7 +151,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
-extern void jump_label_invalidate_init(void);
+extern void jump_label_invalidate_initmem(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -199,7 +199,7 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
-static inline void jump_label_invalidate_init(void) {}
+static inline void jump_label_invalidate_initmem(void) {}
static __always_inline bool static_key_false(struct static_key *key)
{
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index adc13474a53b..d6459bd1376d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -18,7 +18,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE];
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
-extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
+extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D];
void kasan_populate_zero_shadow(const void *shadow_start,
const void *shadow_end);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8be5077efb5f..f92ea7783652 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -187,7 +187,6 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
-unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 6ed79a8a8318..9d3a03364e6e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -453,8 +453,8 @@ struct mlx5_core_srq {
struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
- int max_gs;
- int max_avail_gather;
+ size_t max_gs;
+ size_t max_avail_gather;
int wqe_shift;
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7522a6987595..a2db4576e499 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -816,10 +816,6 @@ int local_memory_node(int node_id);
static inline int local_memory_node(int node_id) { return node_id; };
#endif
-#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
-#endif
-
/*
* zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
*/
@@ -1289,7 +1285,6 @@ struct mminit_pfnnid_cache {
#endif
void memory_present(int nid, unsigned long start, unsigned long end);
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index cb3bbed4e633..14bc0d5d0ee5 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -14,7 +14,6 @@
#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
-#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
diff --git a/include/linux/net.h b/include/linux/net.h
index 91216b16feb7..2a0391eea05c 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -222,6 +222,7 @@ enum {
int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
+bool sock_is_registered(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index bebeaad897cc..29ed8fd6379a 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -231,7 +231,7 @@ static inline void net_dim_exit_parking(struct net_dim *dim)
}
#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+ (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
static inline int net_dim_stats_compare(struct net_dim_stats *curr,
struct net_dim_stats *prev)
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1313b35c3ab7..14529511c4b8 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -285,6 +285,8 @@ unsigned int *xt_alloc_entry_offsets(unsigned int size);
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size);
+int xt_check_proc_name(const char *name, unsigned int size);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
diff --git a/include/linux/of.h b/include/linux/of.h
index da1ee95241c1..ebf22dd0860c 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1359,8 +1359,8 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
-/* ID based overlays; the API for external users */
-int of_overlay_apply(struct device_node *tree, int *ovcs_id);
+int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1369,7 +1369,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_apply(struct device_node *tree, int *ovcs_id)
+static inline int of_overlay_fdt_apply(void *overlay_fdt, int *ovcs_id)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 88865e0ebf4d..091033a6b836 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,7 +13,6 @@ struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
-int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
@@ -34,12 +33,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
}
static inline int
-of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return 0;
-}
-
-static inline int
of_pci_parse_bus_range(struct device_node *node, struct resource *res)
{
return -EINVAL;
@@ -67,6 +60,16 @@ of_pci_get_max_link_speed(struct device_node *node)
static inline void of_pci_check_probe_only(void) { }
#endif
+#if IS_ENABLED(CONFIG_OF_IRQ)
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
+#else
+static inline int
+of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return 0;
+}
+#endif
+
#if defined(CONFIG_OF_ADDRESS)
int of_pci_get_host_bridge_resources(struct device_node *dev,
unsigned char busno, unsigned char bus_max,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 024a1beda008..ae42289662df 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1147,6 +1147,8 @@ void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
+void pci_wakeup_bus(struct pci_bus *bus);
+void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a6b30667a331..a637a7d8ce5b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -45,6 +45,7 @@
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
#define PCI_BASE_CLASS_MEMORY 0x05
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 864d167a1073..009cdf3d65b6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -30,10 +30,14 @@
* calls io_destroy() or the process exits.
*
* In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
- * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
- * the kioctx from the proccess's list of kioctxs - after that, there can't be
- * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
- * the initial ref with percpu_ref_put().
+ * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
+ * After that, there can't be any new users of the kioctx (from lookup_ioctx())
+ * and it's then safe to drop the initial ref with percpu_ref_put().
+ *
+ * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
+ * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
+ * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
+ * with RCU protection, it must be done explicitly.
*
* Code that does a two stage shutdown like this often needs some kind of
* explicit synchronization to ensure the initial refcount can only be dropped
@@ -113,8 +117,10 @@ void percpu_ref_reinit(struct percpu_ref *ref);
* Must be used to drop the initial ref on a percpu refcount; must be called
* precisely once before shutdown.
*
- * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
- * percpu counters and dropping the initial ref.
+ * Switches @ref into atomic mode before gathering up the percpu counters
+ * and dropping the initial ref.
+ *
+ * There are no implied RCU grace periods between kill and release.
*/
static inline void percpu_ref_kill(struct percpu_ref *ref)
{
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 7546822a1d74..e71e99eb9a4e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -449,14 +449,19 @@ struct pmu {
int (*filter_match) (struct perf_event *event); /* optional */
};
+enum perf_addr_filter_action_t {
+ PERF_ADDR_FILTER_ACTION_STOP = 0,
+ PERF_ADDR_FILTER_ACTION_START,
+ PERF_ADDR_FILTER_ACTION_FILTER,
+};
+
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
* @inode: object file's inode for file-based filters
* @offset: filter range offset
- * @size: filter range size
- * @range: 1: range, 0: address
- * @filter: 1: filter/start, 0: stop
+ * @size: filter range size (size==0 means single address trigger)
+ * @action: filter/start/stop
*
* This is a hardware-agnostic filter configuration as specified by the user.
*/
@@ -465,8 +470,7 @@ struct perf_addr_filter {
struct inode *inode;
unsigned long offset;
unsigned long size;
- unsigned int range : 1,
- filter : 1;
+ enum perf_addr_filter_action_t action;
};
/**
@@ -536,6 +540,10 @@ struct pmu_event_list {
struct list_head list;
};
+#define for_each_sibling_event(sibling, event) \
+ if ((event)->group_leader == (event)) \
+ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -549,16 +557,16 @@ struct perf_event {
struct list_head event_entry;
/*
- * XXX: group_entry and sibling_list should be mutually exclusive;
- * either you're a sibling on a group, or you're the group leader.
- * Rework the code to always use the same list element.
- *
* Locked for modification by both ctx->mutex and ctx->lock; holding
* either sufficies for read.
*/
- struct list_head group_entry;
struct list_head sibling_list;
-
+ struct list_head active_list;
+ /*
+ * Node on the pinned or flexible tree located at the event context;
+ */
+ struct rb_node group_node;
+ u64 group_index;
/*
* We need storage to track the entries in perf_pmu_migrate_context; we
* cannot use the event_entry because of RCU and we want to keep the
@@ -690,6 +698,12 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */
};
+
+struct perf_event_groups {
+ struct rb_root tree;
+ u64 index;
+};
+
/**
* struct perf_event_context - event context structure
*
@@ -710,9 +724,13 @@ struct perf_event_context {
struct mutex mutex;
struct list_head active_ctx_list;
- struct list_head pinned_groups;
- struct list_head flexible_groups;
+ struct perf_event_groups pinned_groups;
+ struct perf_event_groups flexible_groups;
struct list_head event_list;
+
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+
int nr_events;
int nr_active;
int is_active;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d7069539f351..7c4c2379e010 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -984,6 +984,10 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
{
return 0;
}
+int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
+ u16 regnum);
+int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
+ u16 regnum, u16 val);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1012,7 +1016,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct phy_device *phydev);
void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 043d04784675..36360d07f25b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -214,10 +214,12 @@ do { \
#endif
/*
- * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
- * initialization and destruction of rcu_head on the stack. rcu_head structures
- * allocated dynamically in the heap or defined statically don't need any
- * initialization.
+ * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
+ * are needed for dynamic initialization and destruction of rcu_head
+ * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
+ * dynamic initialization and destruction of statically allocated rcu_head
+ * structures. However, rcu_head structures allocated dynamically in the
+ * heap don't need any initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index c9df2527e0cd..668a21f04b09 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -766,8 +766,10 @@ slow_path:
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
data = rht_obj(ht, head);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b161ef8a902e..f228c6033832 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -93,7 +93,6 @@ struct task_group;
/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
-#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
/* get_task_state(): */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
@@ -275,6 +274,34 @@ struct load_weight {
u32 inv_weight;
};
+/**
+ * struct util_est - Estimation utilization of FAIR tasks
+ * @enqueued: instantaneous estimated utilization of a task/cpu
+ * @ewma: the Exponential Weighted Moving Average (EWMA)
+ * utilization of a task
+ *
+ * Support data structure to track an Exponential Weighted Moving Average
+ * (EWMA) of a FAIR task's utilization. New samples are added to the moving
+ * average each time a task completes an activation. Sample's weight is chosen
+ * so that the EWMA will be relatively insensitive to transient changes to the
+ * task's workload.
+ *
+ * The enqueued attribute has a slightly different meaning for tasks and cpus:
+ * - task: the task's util_avg at last task dequeue time
+ * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
+ * Thus, the util_est.enqueued of a task represents the contribution on the
+ * estimated utilization of the CPU where that task is currently enqueued.
+ *
+ * Only for tasks we track a moving average of the past instantaneous
+ * estimated utilization. This allows to absorb sporadic drops in utilization
+ * of an otherwise almost periodic task.
+ */
+struct util_est {
+ unsigned int enqueued;
+ unsigned int ewma;
+#define UTIL_EST_WEIGHT_SHIFT 2
+};
+
/*
* The load_avg/util_avg accumulates an infinite geometric series
* (see __update_load_avg() in kernel/sched/fair.c).
@@ -336,6 +363,7 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_load_avg;
unsigned long util_avg;
+ struct util_est util_est;
};
struct sched_statistics {
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 0b55834efd46..59667444669f 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -8,9 +8,8 @@
* Interface between cpufreq drivers and the scheduler:
*/
-#define SCHED_CPUFREQ_RT (1U << 0)
-#define SCHED_CPUFREQ_DL (1U << 1)
-#define SCHED_CPUFREQ_IOWAIT (1U << 2)
+#define SCHED_CPUFREQ_IOWAIT (1U << 0)
+#define SCHED_CPUFREQ_MIGRATION (1U << 1)
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 094217273ff9..b36f4cf38111 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -16,11 +16,9 @@ static inline void cpu_load_update_nohz_stop(void) { }
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
-extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
-static inline void set_cpu_sd_state_idle(void) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ddf77cf4ff2d..99df17109e1b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4037,6 +4037,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+/* Note: Should be called only if skb_is_gso(skb) is true */
+static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
+}
+
static inline void skb_gso_reset(struct sk_buff *skb)
{
skb_shinfo(skb)->gso_size = 0;
@@ -4044,6 +4050,22 @@ static inline void skb_gso_reset(struct sk_buff *skb)
skb_shinfo(skb)->gso_type = 0;
}
+static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
+ u16 increment)
+{
+ if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
+ return;
+ shinfo->gso_size += increment;
+}
+
+static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
+ u16 decrement)
+{
+ if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
+ return;
+ shinfo->gso_size -= decrement;
+}
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 8a1442c4e513..c20acfcf647d 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -540,6 +540,14 @@ extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
extern int perf_trace_add(struct perf_event *event, int flags);
extern void perf_trace_del(struct perf_event *event, int flags);
+#ifdef CONFIG_KPROBE_EVENTS
+extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
+extern void perf_kprobe_destroy(struct perf_event *event);
+#endif
+#ifdef CONFIG_UPROBE_EVENTS
+extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
+extern void perf_uprobe_destroy(struct perf_event *event);
+#endif
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 0a6c71e0ad01..47f8af22f216 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -364,6 +364,7 @@ struct tty_file_private {
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
+#define TTY_HUPPING 19 /* Hangup in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
/* Values for tty->flow_change */
diff --git a/include/linux/types.h b/include/linux/types.h
index c94d59ef96cc..ec13d02b3481 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -217,7 +217,7 @@ struct ustat {
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
- * - the structure shares storage spacer in struct page with @compound_head,
+ * - the structure shares storage space in struct page with @compound_head,
* which encode PageTail() in bit 0. The guarantee is needed to avoid
* false-positive PageTail().
*/
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 5bdbd9f49395..07ee0f84a46c 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -90,6 +90,28 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
#endif
}
+static inline unsigned long
+u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+{
+ unsigned long flags = 0;
+
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ local_irq_save(flags);
+ write_seqcount_begin(&syncp->seq);
+#endif
+ return flags;
+}
+
+static inline void
+u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
+ local_irq_restore(flags);
+#endif
+}
+
static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index f1fcec2fd5f8..b7a99ce56bc9 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -63,4 +63,7 @@
*/
#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12)
+/* Device needs a pause after every control message. */
+#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 960bedbdec87..77f0f0af3a71 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -168,11 +168,8 @@ int vga_switcheroo_process_delayed_switch(void);
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
-
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -192,11 +189,8 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
-static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
-
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index bc0cda180c8b..0c3301421c57 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -456,7 +456,6 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
-extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
diff --git a/include/net/ip.h b/include/net/ip.h
index 746abff9ce51..f49b3a576bec 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -328,6 +328,13 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}
+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+ const struct rtable *rt = (const struct rtable *)dst;
+
+ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
static inline
int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{
@@ -335,7 +342,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
return pmtudisc == IP_PMTUDISC_DO ||
(pmtudisc == IP_PMTUDISC_WANT &&
- !(dst_metric_locked(dst, RTAX_MTU)));
+ !ip_mtu_locked(dst));
}
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -361,7 +368,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
struct net *net = dev_net(dst->dev);
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
- dst_metric_locked(dst, RTAX_MTU) ||
+ ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 27d23a65f3cd..ac0866bb9e93 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -179,6 +179,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
void rt6_multipath_rebalance(struct rt6_info *rt);
+void rt6_uncached_list_add(struct rt6_info *rt);
+void rt6_uncached_list_del(struct rt6_info *rt);
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f80524396c06..77d0a78cf7d2 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -59,6 +59,7 @@ struct fib_nh_exception {
int fnhe_genid;
__be32 fnhe_daddr;
u32 fnhe_pmtu;
+ bool fnhe_mtu_locked;
__be32 fnhe_gw;
unsigned long fnhe_expires;
struct rtable __rcu *fnhe_rth_input;
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..5c40f118c0fa 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -103,7 +103,7 @@ void llc_sk_reset(struct sock *sk);
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c96511fa9198..2b581bd93812 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2063,6 +2063,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
* TDLS links.
*
+ * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
+ * support QoS NDP for AP probing - that's most likely a driver bug.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2106,6 +2109,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
+ IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 663b015dace5..30eb0652b025 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1068,6 +1068,8 @@ struct nft_object_ops {
int nft_register_obj(struct nft_object_type *obj_type);
void nft_unregister_obj(struct nft_object_type *obj_type);
+#define NFT_FLOWTABLE_DEVICE_MAX 8
+
/**
* struct nft_flowtable - nf_tables flow table
*
@@ -1080,6 +1082,7 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
+ * @dev_name: array of device names
* @data: rhashtable and garbage collector
* @ops: array of hooks
*/
@@ -1093,6 +1096,7 @@ struct nft_flowtable {
u32 genmask:2,
use:30;
u64 handle;
+ char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
struct nf_flowtable data;
diff --git a/include/net/route.h b/include/net/route.h
index 1eb9ce470e25..20a92ca9e115 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -63,7 +63,8 @@ struct rtable {
__be32 rt_gateway;
/* Miscellaneous cached information */
- u32 rt_pmtu;
+ u32 rt_mtu_locked:1,
+ rt_pmtu:31;
u32 rt_table_id;
@@ -227,6 +228,9 @@ struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void rt_add_uncached_list(struct rtable *rt);
+void rt_del_uncached_list(struct rtable *rt);
+
static inline void ip_rt_put(struct rtable *rt)
{
/* dst_release() accepts a NULL parameter.
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e2ab13687fb9..8da32678ce18 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -30,6 +30,7 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_RUNNING,
};
struct qdisc_size_table {
@@ -824,6 +825,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
*to_free = skb;
}
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
@@ -956,6 +967,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
diff --git a/include/net/sock.h b/include/net/sock.h
index 169c92afcafa..ae23f3b389ca 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1137,6 +1137,7 @@ struct proto {
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
+int sock_load_diag_module(int family, int protocol);
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index d656809f1217..415e09960017 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -130,6 +130,8 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
const unsigned char *dst_dev_addr);
int rdma_addr_size(struct sockaddr *addr);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr);
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 73b2387e3f74..ff3ed435701f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1537,10 +1537,6 @@ struct ib_xrcd {
struct mutex tgt_qp_mutex;
struct list_head tgt_qp_list;
- /*
- * Implementation details of the RDMA core, don't use in drivers:
- */
- struct rdma_restrack_entry res;
};
struct ib_ah {
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..9c1e4bad6581 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -452,6 +452,9 @@ struct scsi_host_template {
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* True if the low-level driver supports blk-mq only */
+ unsigned force_blk_mq:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 68169e3749de..5b2ed12f58ce 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -227,9 +227,6 @@ struct hdac_io_ops {
#define HDA_UNSOL_QUEUE_SIZE 64
#define HDA_MAX_CODECS 8 /* limit by controller side */
-/* HD Audio class code */
-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
-
/*
* CORB/RIRB
*
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
index 200f731be557..7b706ff21335 100644
--- a/include/trace/events/mmc.h
+++ b/include/trace/events/mmc.h
@@ -86,8 +86,8 @@ TRACE_EVENT(mmc_request_start,
__entry->stop_flags, __entry->stop_retries,
__entry->sbc_opcode, __entry->sbc_arg,
__entry->sbc_flags, __entry->sbc_retries,
- __entry->blocks, __entry->blk_addr,
- __entry->blksz, __entry->data_flags, __entry->tag,
+ __entry->blocks, __entry->blksz,
+ __entry->blk_addr, __entry->data_flags, __entry->tag,
__entry->can_retune, __entry->doing_retune,
__entry->retune_now, __entry->need_retune,
__entry->hold_retune, __entry->retune_period)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 0b50fda80db0..d8c33298c153 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -179,6 +179,10 @@ TRACE_EVENT(rcu_grace_period_init,
*
* "snap": Captured snapshot of expedited grace period sequence number.
* "start": Started a real expedited grace period.
+ * "reset": Started resetting the tree
+ * "select": Started selecting the CPUs to wait on.
+ * "selectofl": Selected CPU partially offline.
+ * "startwait": Started waiting on selected CPUs.
* "end": Ended a real expedited grace period.
* "endwake": Woke piggybackers up.
* "done": Someone else did the expedited grace period for us.
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4d21191aaed0..c363b67f2d0a 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -618,6 +618,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_SOS 0x0c
/* Subquery id: Query PSP ASD firmware version */
#define AMDGPU_INFO_FW_ASD 0x0d
+ /* Subquery id: Query VCN firmware version */
+ #define AMDGPU_INFO_FW_VCN 0x0e
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
@@ -664,6 +666,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_VDDNB 0x6
/* Subquery id: Query graphics voltage */
#define AMDGPU_INFO_SENSOR_VDDGFX 0x7
+ /* Subquery id: Query GPU stable pstate shader clock */
+ #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
+ /* Subquery id: Query GPU stable pstate memory clock */
+ #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@@ -802,6 +808,7 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_GDDR5 5
#define AMDGPU_VRAM_TYPE_HBM 6
#define AMDGPU_VRAM_TYPE_DDR3 7
+#define AMDGPU_VRAM_TYPE_DDR4 8
struct drm_amdgpu_info_device {
/** PCI Device ID */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 5597a87154e5..50bcf4214ff9 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -38,14 +38,18 @@ extern "C" {
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
-#define DRM_MODE_TYPE_BUILTIN (1<<0)
-#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
-#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
#define DRM_MODE_TYPE_PREFERRED (1<<3)
-#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
+#define DRM_MODE_TYPE_ALL (DRM_MODE_TYPE_PREFERRED | \
+ DRM_MODE_TYPE_USERDEF | \
+ DRM_MODE_TYPE_DRIVER)
+
/* Video mode flags */
/* bit compatible with the xrandr RR_ definitions (bits 0-13)
*
@@ -66,8 +70,8 @@ extern "C" {
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
-#define DRM_MODE_FLAG_BCAST (1<<10)
-#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
+#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/*
@@ -99,6 +103,20 @@ extern "C" {
#define DRM_MODE_FLAG_PIC_AR_16_9 \
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
+ DRM_MODE_FLAG_NHSYNC | \
+ DRM_MODE_FLAG_PVSYNC | \
+ DRM_MODE_FLAG_NVSYNC | \
+ DRM_MODE_FLAG_INTERLACE | \
+ DRM_MODE_FLAG_DBLSCAN | \
+ DRM_MODE_FLAG_CSYNC | \
+ DRM_MODE_FLAG_PCSYNC | \
+ DRM_MODE_FLAG_NCSYNC | \
+ DRM_MODE_FLAG_HSKEW | \
+ DRM_MODE_FLAG_DBLCLK | \
+ DRM_MODE_FLAG_CLKDIV2 | \
+ DRM_MODE_FLAG_3D_MASK)
+
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
@@ -173,6 +191,10 @@ extern "C" {
DRM_MODE_REFLECT_X | \
DRM_MODE_REFLECT_Y)
+/* Content Protection Flags */
+#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
+#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
+#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
struct drm_mode_modeinfo {
__u32 clock;
@@ -341,7 +363,7 @@ struct drm_mode_get_connector {
__u32 pad;
};
-#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
@@ -576,8 +598,11 @@ struct drm_mode_crtc_lut {
};
struct drm_color_ctm {
- /* Conversion matrix in S31.32 format. */
- __s64 matrix[9];
+ /*
+ * Conversion matrix in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[9];
};
struct drm_color_lut {
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index e9b997a0ef27..0d5c49dc478c 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -55,6 +55,12 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
+#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
+#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
+#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
+#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
+#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
+#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 536ee4febd74..7f5634ce8e88 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -318,6 +318,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_OPEN 0x36
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
+#define DRM_I915_QUERY 0x39
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -375,6 +376,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -1358,7 +1360,9 @@ struct drm_intel_overlay_attrs {
* active on a given plane.
*/
-#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
+ * flags==0 to disable colorkeying.
+ */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
@@ -1604,15 +1608,115 @@ struct drm_i915_perf_oa_config {
__u32 n_flex_regs;
/*
- * These fields are pointers to tuples of u32 values (register
- * address, value). For example the expected length of the buffer
- * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ * These fields are pointers to tuples of u32 values (register address,
+ * value). For example the expected length of the buffer pointed by
+ * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
*/
__u64 mux_regs_ptr;
__u64 boolean_regs_ptr;
__u64 flex_regs_ptr;
};
+struct drm_i915_query_item {
+ __u64 query_id;
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+
+ /*
+ * When set to zero by userspace, this is filled with the size of the
+ * data to be written at the data_ptr pointer. The kernel sets this
+ * value to a negative value to signal an error on a particular query
+ * item.
+ */
+ __s32 length;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * Data will be written at the location pointed by data_ptr when the
+ * value of length matches the length of the data to be written by the
+ * kernel.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_query {
+ __u32 num_items;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * This points to an array of num_items drm_i915_query_item structures.
+ */
+ __u64 items_ptr;
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
+ *
+ * data: contains the 3 pieces of information :
+ *
+ * - the slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the following
+ * formula :
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * - the subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. The availability of subslice Y in slice
+ * X can be queried with the following formula :
+ *
+ * (data[subslice_offset +
+ * X * subslice_stride +
+ * Y / 8] >> (Y % 8)) & 1
+ *
+ * - the EU mask for each subslice in each slice with one bit per EU telling
+ * whether an EU is available. The availability of EU Z in subslice Y in
+ * slice X can be queried with the following formula :
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8] >> (Z % 8)) & 1
+ */
+struct drm_i915_query_topology_info {
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u16 flags;
+
+ __u16 max_slices;
+ __u16 max_subslices;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Offset in data[] at which the subslice masks are stored.
+ */
+ __u16 subslice_offset;
+
+ /*
+ * Stride at which each of the subslice masks for each slice are
+ * stored.
+ */
+ __u16 subslice_stride;
+
+ /*
+ * Offset in data[] at which the EU masks are stored.
+ */
+ __u16 eu_offset;
+
+ /*
+ * Stride at which each of the EU masks for each subslice are stored.
+ */
+ __u16 eu_stride;
+
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index bbbaffad772d..c06d0a5bdd80 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -201,10 +201,12 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
+ MSM_SUBMIT_SUDO | \
0)
/* Each cmdstream submit consists of a table of buffers involved, and
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index 52263b575bdc..b95a0e11cb07 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -42,6 +42,9 @@ extern "C" {
#define DRM_VC4_GET_TILING 0x09
#define DRM_VC4_LABEL_BO 0x0a
#define DRM_VC4_GEM_MADVISE 0x0b
+#define DRM_VC4_PERFMON_CREATE 0x0c
+#define DRM_VC4_PERFMON_DESTROY 0x0d
+#define DRM_VC4_PERFMON_GET_VALUES 0x0e
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -55,6 +58,9 @@ extern "C" {
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
+#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
+#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
+#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
@@ -173,6 +179,15 @@ struct drm_vc4_submit_cl {
* wait ioctl).
*/
__u64 seqno;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmonid;
+
+ /* Unused field to align this struct on 64 bits. Must be set to 0.
+ * If one ever needs to add an u32 field to this struct, this field
+ * can be used.
+ */
+ __u32 pad2;
};
/**
@@ -308,6 +323,7 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
+#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
struct drm_vc4_get_param {
__u32 param;
@@ -352,6 +368,66 @@ struct drm_vc4_gem_madvise {
__u32 pad;
};
+enum {
+ VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
+ VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
+ VC4_PERFCNT_FEP_CLIPPED_QUADS,
+ VC4_PERFCNT_FEP_VALID_QUADS,
+ VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
+ VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
+ VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
+ VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
+ VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
+ VC4_PERFCNT_PSE_PRIMS_REVERSED,
+ VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
+ VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
+ VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
+ VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
+ VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
+ VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
+ VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
+ VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
+ VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
+ VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
+ VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
+ VC4_PERFCNT_NUM_EVENTS,
+};
+
+#define DRM_VC4_MAX_PERF_COUNTERS 16
+
+struct drm_vc4_perfmon_create {
+ __u32 id;
+ __u32 ncounters;
+ __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
+};
+
+struct drm_vc4_perfmon_destroy {
+ __u32 id;
+};
+
+/*
+ * Returns the values of the performance counters tracked by this
+ * perfmon (as an array of ncounters u64 values).
+ *
+ * No implicit synchronization is performed, so the user has to
+ * guarantee that any jobs using this perfmon have already been
+ * completed (probably by blocking on the seqno returned by the
+ * last exec that used the perfmon).
+ */
+struct drm_vc4_perfmon_get_values {
+ __u32 id;
+ __u64 values_ptr;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 8bbbcb5cd94b..820de5d222d2 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -30,6 +30,7 @@
*/
#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_TLEN 2 /* Octets in ethernet type field */
#define ETH_HLEN 14 /* Total octets in header. */
#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
#define ETH_DATA_LEN 1500 /* Max. octets in payload */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f4cab5b3ba9a..b4f5073dbac2 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -107,8 +107,6 @@ struct kfd_ioctl_get_clock_counters_args {
__u32 pad;
};
-#define NUM_OF_SUPPORTED_GPUS 7
-
struct kfd_process_device_apertures {
__u64 lds_base; /* from KFD */
__u64 lds_limit; /* from KFD */
@@ -120,6 +118,12 @@ struct kfd_process_device_apertures {
__u32 pad;
};
+/*
+ * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
+ * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
+ * unlimited number of GPUs.
+ */
+#define NUM_OF_SUPPORTED_GPUS 7
struct kfd_ioctl_get_process_apertures_args {
struct kfd_process_device_apertures
process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
@@ -129,6 +133,19 @@ struct kfd_ioctl_get_process_apertures_args {
__u32 pad;
};
+struct kfd_ioctl_get_process_apertures_new_args {
+ /* User allocated. Pointer to struct kfd_process_device_apertures
+ * filled in by Kernel
+ */
+ __u64 kfd_process_device_apertures_ptr;
+ /* to KFD - indicates amount of memory present in
+ * kfd_process_device_apertures_ptr
+ * from KFD - Number of entries filled by KFD.
+ */
+ __u32 num_of_nodes;
+ __u32 pad;
+};
+
#define MAX_ALLOWED_NUM_POINTS 100
#define MAX_ALLOWED_AW_BUFF_SIZE 4096
#define MAX_ALLOWED_WAC_BUFF_SIZE 128
@@ -263,10 +280,90 @@ struct kfd_ioctl_get_tile_config_args {
};
struct kfd_ioctl_set_trap_handler_args {
- uint64_t tba_addr; /* to KFD */
- uint64_t tma_addr; /* to KFD */
- uint32_t gpu_id; /* to KFD */
- uint32_t pad;
+ __u64 tba_addr; /* to KFD */
+ __u64 tma_addr; /* to KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 pad;
+};
+
+struct kfd_ioctl_acquire_vm_args {
+ __u32 drm_fd; /* to KFD */
+ __u32 gpu_id; /* to KFD */
+};
+
+/* Allocation flags: memory types */
+#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
+#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
+#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+/* Allocation flags: attributes/access options */
+#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
+#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
+#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
+#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
+#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
+#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
+
+/* Allocate memory for later SVM (shared virtual memory) mapping.
+ *
+ * @va_addr: virtual address of the memory to be allocated
+ * all later mappings on all GPUs will use this address
+ * @size: size in bytes
+ * @handle: buffer handle returned to user mode, used to refer to
+ * this allocation for mapping, unmapping and freeing
+ * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
+ * for userptrs this is overloaded to specify the CPU address
+ * @gpu_id: device identifier
+ * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
+ */
+struct kfd_ioctl_alloc_memory_of_gpu_args {
+ __u64 va_addr; /* to KFD */
+ __u64 size; /* to KFD */
+ __u64 handle; /* from KFD */
+ __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
+ __u32 gpu_id; /* to KFD */
+ __u32 flags;
+};
+
+/* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
+ *
+ * @handle: memory handle returned by alloc
+ */
+struct kfd_ioctl_free_memory_of_gpu_args {
+ __u64 handle; /* to KFD */
+};
+
+/* Map memory to one or more GPUs
+ *
+ * @handle: memory handle returned by alloc
+ * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
+ * @n_devices: number of devices in the array
+ * @n_success: number of devices mapped successfully
+ *
+ * @n_success returns information to the caller how many devices from
+ * the start of the array have mapped the buffer successfully. It can
+ * be passed into a subsequent retry call to skip those devices. For
+ * the first call the caller should initialize it to 0.
+ *
+ * If the ioctl completes with return code 0 (success), n_success ==
+ * n_devices.
+ */
+struct kfd_ioctl_map_memory_to_gpu_args {
+ __u64 handle; /* to KFD */
+ __u64 device_ids_array_ptr; /* to KFD */
+ __u32 n_devices; /* to KFD */
+ __u32 n_success; /* to/from KFD */
+};
+
+/* Unmap memory from one or more GPUs
+ *
+ * same arguments as for mapping
+ */
+struct kfd_ioctl_unmap_memory_from_gpu_args {
+ __u64 handle; /* to KFD */
+ __u64 device_ids_array_ptr; /* to KFD */
+ __u32 n_devices; /* to KFD */
+ __u32 n_success; /* to/from KFD */
};
#define AMDKFD_IOCTL_BASE 'K'
@@ -332,7 +429,26 @@ struct kfd_ioctl_set_trap_handler_args {
#define AMDKFD_IOC_SET_TRAP_HANDLER \
AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
+ AMDKFD_IOWR(0x14, \
+ struct kfd_ioctl_get_process_apertures_new_args)
+
+#define AMDKFD_IOC_ACQUIRE_VM \
+ AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
+
+#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
+ AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
+
+#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
+ AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
+
+#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
+ AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
+
+#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
+ AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x14
+#define AMDKFD_COMMAND_END 0x1A
#endif
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index 4fe580d36e41..f5bf06ecd87d 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -54,7 +54,6 @@
#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW)
#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE)
#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2)
-#define LIRC_CAN_SEND_SCANCODE LIRC_MODE2SEND(LIRC_MODE_SCANCODE)
#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE)
#define LIRC_CAN_SEND_MASK 0x0000003f
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e0739a1aa4b2..912b85b52344 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -380,10 +380,14 @@ struct perf_event_attr {
__u32 bp_type;
union {
__u64 bp_addr;
+ __u64 kprobe_func; /* for perf_kprobe */
+ __u64 uprobe_path; /* for perf_uprobe */
__u64 config1; /* extension of config */
};
union {
__u64 bp_len;
+ __u64 kprobe_addr; /* when kprobe_func == NULL */
+ __u64 probe_offset; /* for perf_[k,u]probe */
__u64 config2; /* extension of config1 */
};
__u64 branch_sample_type; /* enum perf_branch_sample_type */
@@ -444,17 +448,18 @@ struct perf_event_query_bpf {
/*
* Ioctls that can be done on a perf event fd:
*/
-#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
-#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 17a022c5b414..da3315ed1bcd 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -370,7 +370,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
{
return (protocol == UAC_VERSION_1) ?
desc->baSourceID[desc->bNrInPins + 4] :
- desc->baSourceID[desc->bNrInPins + 6];
+ 2; /* in UAC2, this value is constant */
}
static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
@@ -378,7 +378,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
{
return (protocol == UAC_VERSION_1) ?
&desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 7];
+ &desc->baSourceID[desc->bNrInPins + 6];
}
static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
index 4b0b0b756f3e..0af83d80fb3e 100644
--- a/include/uapi/misc/ocxl.h
+++ b/include/uapi/misc/ocxl.h
@@ -32,6 +32,22 @@ struct ocxl_ioctl_attach {
__u64 reserved3;
};
+struct ocxl_ioctl_metadata {
+ __u16 version; // struct version, always backwards compatible
+
+ // Version 0 fields
+ __u8 afu_version_major;
+ __u8 afu_version_minor;
+ __u32 pasid; // PASID assigned to the current context
+
+ __u64 pp_mmio_size; // Per PASID MMIO size
+ __u64 global_mmio_size;
+
+ // End version 0 fields
+
+ __u64 reserved[13]; // Total of 16*u64
+};
+
struct ocxl_ioctl_irq_fd {
__u64 irq_offset;
__s32 eventfd;
@@ -45,5 +61,6 @@ struct ocxl_ioctl_irq_fd {
#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64)
#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64)
#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd)
+#define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata)
#endif /* _UAPI_MISC_OCXL_H */