summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-11-23 08:21:37 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-11-23 08:21:37 +0100
commit03c1136af504bbc2cabda76af6b27fd5f7cf8a7d (patch)
tree6c53a0f635b4339571050006191a950e3f5db93c /include/linux
parent0d79a48440f559ac939d1be2089757c5e4ab16c7 (diff)
parent418baf2c28f3473039f2f7377760bd8f6897ae18 (diff)
downloadlinux-stable-03c1136af504bbc2cabda76af6b27fd5f7cf8a7d.tar.gz
linux-stable-03c1136af504bbc2cabda76af6b27fd5f7cf8a7d.tar.bz2
linux-stable-03c1136af504bbc2cabda76af6b27fd5f7cf8a7d.zip
Merge 5.10-rc5 into staging-testing
We want the staging/IIO fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/can/skb.h20
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/compiler-gcc.h21
-rw-r--r--include/linux/compiler.h18
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/cpufreq.h18
-rw-r--r--include/linux/filter.h22
-rw-r--r--include/linux/fs.h38
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/io_uring.h3
-rw-r--r--include/linux/iomap.h2
-rw-r--r--include/linux/jbd2.h25
-rw-r--r--include/linux/memcontrol.h11
-rw-r--r--include/linux/memory_hotplug.h14
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/netfilter/nfnetlink.h9
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/netfilter_ipv6.h10
-rw-r--r--include/linux/numa.h30
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/perf_event.h13
-rw-r--r--include/linux/perf_regs.h6
-rw-r--r--include/linux/pgtable.h4
-rw-r--r--include/linux/phy.h40
-rw-r--r--include/linux/pm_runtime.h27
-rw-r--r--include/linux/refcount.h130
-rw-r--r--include/linux/sched.h26
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/spi/spi.h19
-rw-r--r--include/linux/swiotlb.h11
32 files changed, 317 insertions, 239 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b23eeca4d677..794b2a33a2c3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -235,6 +235,8 @@ enum hctx_type {
* @flags: Zero or more BLK_MQ_F_* flags.
* @driver_data: Pointer to data owned by the block driver that created this
* tag set.
+ * @active_queues_shared_sbitmap:
+ * number of active request queues per tag set.
* @__bitmap_tags: A shared tags sbitmap, used over all hctx's
* @__breserved_tags:
* A shared reserved tags sbitmap, used over all hctx's
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 900b9f4e0605..fc61cf4eff1c 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
- if (skb_shared(skb)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *nskb;
- if (likely(nskb)) {
- can_skb_set_owner(nskb, skb->sk);
- consume_skb(skb);
- return nskb;
- } else {
- kfree_skb(skb);
- return NULL;
- }
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ kfree_skb(skb);
+ return NULL;
}
- /* we can assume to have an unshared skb with proper owner */
- return skb;
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 230604e7f057..98cff1b4b088 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -8,8 +8,10 @@
+ __clang_patchlevel__)
#if CLANG_VERSION < 100001
+#ifndef __BPF_TRACING__
# error Sorry, your version of Clang is too old - please use 10.0.1 or newer.
#endif
+#endif
/* Compiler specific definitions for Clang compiler */
@@ -60,12 +62,6 @@
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
-/* The following are for compatibility with GCC, from compiler-gcc.h,
- * and may be redefined here because they should not be shared with other
- * compilers, like ICC.
- */
-#define barrier() __asm__ __volatile__("" : : : "memory")
-
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index d1e3c6896b71..74c6c0486eed 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -15,25 +15,6 @@
# error Sorry, your version of GCC is too old - please use 4.9 or newer.
#endif
-/* Optimization barrier */
-
-/* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
-/*
- * This version is i.e. to prevent dead stores elimination on @ptr
- * where gcc and llvm may behave differently when otherwise using
- * normal barrier(): while gcc behavior gets along with a normal
- * barrier(), llvm needs an explicit input variable to be assumed
- * clobbered. The issue is as follows: while the inline asm might
- * access any memory it wants, the compiler could have fit all of
- * @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proved that the inline asm wasn't touching any of
- * it. This version works well with both compilers, i.e. we're telling
- * the compiler that the inline asm absolutely may see the contents
- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
- */
-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
-
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
@@ -175,5 +156,3 @@
#else
#define __diag_GCC_8(s)
#endif
-
-#define __no_fgcse __attribute__((optimize("-fno-gcse")))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e512f5505dad..b8fe0c23cfff 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Optimization barrier */
#ifndef barrier
-# define barrier() __memory_barrier()
+/* The "volatile" is due to gcc bugs */
+# define barrier() __asm__ __volatile__("": : :"memory")
#endif
#ifndef barrier_data
-# define barrier_data(ptr) barrier()
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proved that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
#endif
/* workaround for GCC PR82365 if needed */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6e390d58a9f8..ac3fa37a84f9 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -247,10 +247,6 @@ struct ftrace_likely_data {
#define asm_inline asm
#endif
-#ifndef __no_fgcse
-# define __no_fgcse
-#endif
-
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1eaa04f1bae6..acbad3b36322 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -110,6 +110,12 @@ struct cpufreq_policy {
bool fast_switch_enabled;
/*
+ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
+ * governor.
+ */
+ bool strict_target;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
@@ -570,12 +576,20 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- /* For governors which change frequency dynamically by themselves */
- bool dynamic_switching;
struct list_head governor_list;
struct module *owner;
+ u8 flags;
};
+/* Governor flags */
+
+/* For governors which change frequency dynamically by themselves */
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
+
+/* For governors wanting the target frequency to be set exactly */
+#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
+
+
/* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 72d62cbc1578..1b62397bd124 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -558,21 +558,21 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
- u32 ret; \
+ u32 __ret; \
cant_migrate(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
+ struct bpf_prog_stats *__stats; \
+ u64 __start = sched_clock(); \
+ __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ __stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&__stats->syncp); \
+ __stats->cnt++; \
+ __stats->nsecs += sched_clock() - __start; \
+ u64_stats_update_end(&__stats->syncp); \
} else { \
- ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
} \
- ret; })
+ __ret; })
#define BPF_PROG_RUN(prog, ctx) \
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 21cc971fd960..8667d0cdc71e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1580,8 +1580,24 @@ extern struct timespec64 current_time(struct inode *inode);
* Snapshotting support.
*/
-void __sb_end_write(struct super_block *sb, int level);
-int __sb_start_write(struct super_block *sb, int level, bool wait);
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level-1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
@@ -1645,12 +1661,12 @@ static inline void sb_end_intwrite(struct super_block *sb)
*/
static inline void sb_start_write(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_WRITE, true);
+ __sb_start_write(sb, SB_FREEZE_WRITE);
}
-static inline int sb_start_write_trylock(struct super_block *sb)
+static inline bool sb_start_write_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_WRITE, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
}
/**
@@ -1674,7 +1690,7 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*/
static inline void sb_start_pagefault(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
}
/*
@@ -1692,12 +1708,12 @@ static inline void sb_start_pagefault(struct super_block *sb)
*/
static inline void sb_start_intwrite(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_FS, true);
+ __sb_start_write(sb, SB_FREEZE_FS);
}
-static inline int sb_start_intwrite_trylock(struct super_block *sb)
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_FS, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
}
@@ -2756,14 +2772,14 @@ static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+ sb_start_write(file_inode(file)->i_sb);
}
static inline bool file_start_write_trylock(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
- return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
+ return sb_start_write_trylock(file_inode(file)->i_sb);
}
static inline void file_end_write(struct file *file)
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 38f23d757013..03da3f603d30 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -315,7 +315,7 @@ static inline int get_disk_ro(struct gendisk *disk)
extern void disk_block_events(struct gendisk *disk);
extern void disk_unblock_events(struct gendisk *disk);
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
-void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
+bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
bool update_bdev);
/* drivers/char/random.c */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index fbf5b3e7707e..d956987ed032 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -798,7 +798,6 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
-extern int intel_iommu_tboot_noforce;
extern int intel_iommu_gfx_mapped;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 868364cea3b7..35b2d845704d 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -30,7 +30,8 @@ struct io_uring_task {
struct percpu_counter inflight;
struct io_identity __identity;
struct io_identity *identity;
- bool in_idle;
+ atomic_t in_idle;
+ bool sqpoll;
};
#if defined(CONFIG_IO_URING)
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 172b3397a1a3..5bd3cac4df9c 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -221,7 +221,7 @@ struct iomap_writeback_ops {
* Optional, allows the file system to discard state on a page where
* we failed to submit any I/O.
*/
- void (*discard_page)(struct page *page);
+ void (*discard_page)(struct page *page, loff_t fileoff);
};
struct iomap_writepage_ctx {
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 1d5566af48ac..578ff196b3ce 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -68,6 +68,7 @@ extern void *jbd2_alloc(size_t size, gfp_t flags);
extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_MIN_FC_BLOCKS 256
#ifdef __KERNEL__
@@ -400,7 +401,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode - The jbd_inode type is the structure linking inodes in
+ * struct jbd2_inode - The jbd_inode type is the structure linking inodes in
* ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
@@ -944,8 +945,9 @@ struct journal_s
/**
* @j_fc_off:
*
- * Number of fast commit blocks currently allocated.
- * [j_state_lock].
+ * Number of fast commit blocks currently allocated. Accessed only
+ * during fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
*/
unsigned long j_fc_off;
@@ -988,9 +990,9 @@ struct journal_s
struct block_device *j_fs_dev;
/**
- * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_total_len: Total maximum capacity of the journal region on disk.
*/
- unsigned int j_maxlen;
+ unsigned int j_total_len;
/**
* @j_reserved_credits:
@@ -1108,8 +1110,9 @@ struct journal_s
struct buffer_head **j_wbuf;
/**
- * @j_fc_wbuf: Array of fast commit bhs for
- * jbd2_journal_commit_transaction.
+ * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
+ * during a fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
*/
struct buffer_head **j_fc_wbuf;
@@ -1614,16 +1617,20 @@ extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
/* Fast commit related APIs */
-int jbd2_fc_init(journal_t *journal, int num_fc_blks);
int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
int jbd2_fc_end_commit(journal_t *journal);
-int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid);
+int jbd2_fc_end_commit_fallback(journal_t *journal);
int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
int jbd2_submit_inode_data(struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
int jbd2_fc_release_bufs(journal_t *journal);
+static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+{
+ return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+}
+
/*
* is_journal_abort
*
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e391e3c56de5..a80c59af2c60 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -900,12 +900,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
+ event == MEMCG_SWAP_FAIL;
+
atomic_long_inc(&memcg->memory_events_local[event]);
- cgroup_file_notify(&memcg->events_local_file);
+ if (!swap_event)
+ cgroup_file_notify(&memcg->events_local_file);
do {
atomic_long_inc(&memcg->memory_events[event]);
- cgroup_file_notify(&memcg->events_file);
+ if (swap_event)
+ cgroup_file_notify(&memcg->swap_events_file);
+ else
+ cgroup_file_notify(&memcg->events_file);
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
break;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d65c6fdc5cfc..551093b74596 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -281,20 +281,6 @@ static inline bool movable_node_is_enabled(void)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
-#ifdef CONFIG_NUMA
-extern int memory_add_physaddr_to_nid(u64 start);
-extern int phys_to_target_node(u64 start);
-#else
-static inline int memory_add_physaddr_to_nid(u64 start)
-{
- return 0;
-}
-static inline int phys_to_target_node(u64 start)
-{
- return 0;
-}
-#endif
-
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* pgdat resizing functions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ef360fe70aaf..db6ae4d3fb4e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2759,6 +2759,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
+#ifndef io_remap_pfn_range
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+}
+#endif
+
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 89016d08f6a2..f6267e2883f2 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -24,6 +24,12 @@ struct nfnl_callback {
const u_int16_t attr_count; /* number of nlattr's */
};
+enum nfnl_abort_action {
+ NFNL_ABORT_NONE = 0,
+ NFNL_ABORT_AUTOLOAD,
+ NFNL_ABORT_VALIDATE,
+};
+
struct nfnetlink_subsystem {
const char *name;
__u8 subsys_id; /* nfnetlink subsystem ID */
@@ -31,7 +37,8 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
- int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
+ int (*abort)(struct net *net, struct sk_buff *skb,
+ enum nfnl_abort_action action);
void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 082e2c41b7ff..5b70ca868bb1 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -16,7 +16,7 @@ struct ip_rt_info {
u_int32_t mark;
};
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
struct nf_queue_entry;
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 9b67394471e1..48314ade1506 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -42,7 +42,7 @@ struct nf_ipv6_ops {
#if IS_MODULE(CONFIG_IPV6)
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
- int (*route_me_harder)(struct net *net, struct sk_buff *skb);
+ int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
@@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
#endif
}
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
-static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
{
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
@@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
if (!v6_ops)
return -EHOSTUNREACH;
- return v6_ops->route_me_harder(net, skb);
+ return v6_ops->route_me_harder(net, sk, skb);
#elif IS_BUILTIN(CONFIG_IPV6)
- return ip6_route_me_harder(net, skb);
+ return ip6_route_me_harder(net, sk, skb);
#else
return -EHOSTUNREACH;
#endif
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 8cb33ccfb671..cb44cfe2b725 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -21,13 +21,41 @@
#endif
#ifdef CONFIG_NUMA
+#include <linux/printk.h>
+#include <asm/sparsemem.h>
+
/* Generic implementation available */
int numa_map_to_online_node(int node);
-#else
+
+#ifndef memory_add_physaddr_to_nid
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+#endif
+#ifndef phys_to_target_node
+static inline int phys_to_target_node(u64 start)
+{
+ pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+#endif
+#else /* !CONFIG_NUMA */
static inline int numa_map_to_online_node(int node)
{
return NUMA_NO_NODE;
}
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+static inline int phys_to_target_node(u64 start)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c77b7c31b2e4..d5570deff400 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -344,9 +344,9 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
- * @offset: the page index
+ * @index: the page index
*
- * Looks up the page cache entry at @mapping & @offset. If there is a
+ * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
@@ -363,9 +363,9 @@ static inline struct page *find_lock_page(struct address_space *mapping,
/**
* find_lock_head - Locate, pin and lock a pagecache page.
* @mapping: The address_space to search.
- * @offset: The page index.
+ * @index: The page index.
*
- * Looks up the page cache entry at @mapping & @offset. If there is a
+ * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, its head page is returned locked and with an increased
* refcount.
*
@@ -906,6 +906,8 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
xas_set(&xas, rac->_index);
rcu_read_lock();
xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
+ if (xas_retry(&xas, page))
+ continue;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 0c19d279b97f..96450f6fb1de 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1022,13 +1022,7 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
u64 aux_size;
- /*
- * regs_user may point to task_pt_regs or to regs_user_copy, depending
- * on arch details.
- */
struct perf_regs regs_user;
- struct pt_regs regs_user_copy;
-
struct perf_regs regs_intr;
u64 stack_user_size;
@@ -1400,11 +1394,14 @@ perf_event_addr_filters(struct perf_event *event)
extern void perf_event_addr_filters_sync(struct perf_event *event);
extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle,
- struct perf_event *event,
- unsigned int size);
+ struct perf_sample_data *data,
+ struct perf_event *event,
+ unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 2d12e97d5e7b..f632c5725f16 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy);
+ struct pt_regs *regs);
#else
#define PERF_REG_EXTENDED_MASK 0
@@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task)
}
static inline void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 38c33eabea89..71125a4676c4 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1427,10 +1427,6 @@ typedef unsigned int pgtbl_mod_mask;
#endif /* !__ASSEMBLY__ */
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
-#endif
-
#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1
diff --git a/include/linux/phy.h b/include/linux/phy.h
index eb3cb1a98b45..56563e5e0dc7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -147,16 +147,8 @@ typedef enum {
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
-/**
+/*
* phy_supported_speeds - return all speeds currently supported by a PHY device
- * @phy: The PHY device to return supported speeds of.
- * @speeds: buffer to store supported speeds in.
- * @size: size of speeds buffer.
- *
- * Description: Returns the number of supported speeds, and fills
- * the speeds buffer with the supported speeds. If speeds buffer is
- * too small to contain all currently supported speeds, will return as
- * many speeds as can fit.
*/
unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
@@ -1022,14 +1014,9 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
regnum, mask, set);
}
-/**
+/*
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for phy_read();
*/
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
@@ -1064,38 +1051,21 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
__ret; \
})
-/**
+/*
* __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for __phy_read();
*/
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
-/**
+/*
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for phy_write();
*/
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
-/**
+/*
* __phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for __phy_write();
*/
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 18b02dcc168e..b492ae00cc90 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
-extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
-extern void pm_runtime_drop_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device_link *link);
/**
* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
@@ -276,11 +275,10 @@ static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
-static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
-static inline void pm_runtime_drop_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device_link *link) {}
#endif /* !CONFIG_PM */
@@ -389,6 +387,27 @@ static inline int pm_runtime_get_sync(struct device *dev)
}
/**
+ * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Resume @dev synchronously and if that is successful, increment its runtime
+ * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
+ * incremented or a negative error code otherwise.
+ */
+static inline int pm_runtime_resume_and_get(struct device *dev)
+{
+ int ret;
+
+ ret = __pm_runtime_resume(dev, RPM_GET_PUT);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
* @dev: Target device.
*
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 7fabb1af18e0..497990c69b0b 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -147,24 +147,6 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-/**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
- * @i: the value to add to the refcount
- * @r: the refcount
- *
- * Will saturate at REFCOUNT_SATURATED and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
- * Use of this function is not recommended for the normal reference counting
- * use case in which references are taken and released one at a time. In these
- * cases, refcount_inc(), or one of its variants, should instead be used to
- * increment a reference count.
- *
- * Return: false if the passed refcount is 0, true otherwise
- */
static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{
int old = refcount_read(r);
@@ -183,17 +165,12 @@ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, in
return old;
}
-static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
-{
- return __refcount_add_not_zero(i, r, NULL);
-}
-
/**
- * refcount_add - add a value to a refcount
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
- * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
+ * Will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
@@ -203,7 +180,14 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
*/
+static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
+{
+ return __refcount_add_not_zero(i, r, NULL);
+}
+
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
@@ -217,11 +201,32 @@ static inline void __refcount_add(int i, refcount_t *r, int *oldp)
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
static inline void refcount_add(int i, refcount_t *r)
{
__refcount_add(i, r, NULL);
}
+static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero(1, r, oldp);
+}
+
/**
* refcount_inc_not_zero - increment a refcount unless it is 0
* @r: the refcount to increment
@@ -235,14 +240,14 @@ static inline void refcount_add(int i, refcount_t *r)
*
* Return: true if the increment was successful, false otherwise
*/
-static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
+static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
- return __refcount_add_not_zero(1, r, oldp);
+ return __refcount_inc_not_zero(r, NULL);
}
-static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
+static inline void __refcount_inc(refcount_t *r, int *oldp)
{
- return __refcount_inc_not_zero(r, NULL);
+ __refcount_add(1, r, oldp);
}
/**
@@ -257,14 +262,27 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
-static inline void __refcount_inc(refcount_t *r, int *oldp)
+static inline void refcount_inc(refcount_t *r)
{
- __refcount_add(1, r, oldp);
+ __refcount_inc(r, NULL);
}
-static inline void refcount_inc(refcount_t *r)
+static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
- __refcount_inc(r, NULL);
+ int old = atomic_fetch_sub_release(i, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (old == i) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ if (unlikely(old < 0 || old - i < 0))
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+
+ return false;
}
/**
@@ -287,27 +305,14 @@ static inline void refcount_inc(refcount_t *r)
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
+static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
- int old = atomic_fetch_sub_release(i, &r->refs);
-
- if (oldp)
- *oldp = old;
-
- if (old == i) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- if (unlikely(old < 0 || old - i < 0))
- refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
-
- return false;
+ return __refcount_sub_and_test(i, r, NULL);
}
-static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
+static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
- return __refcount_sub_and_test(i, r, NULL);
+ return __refcount_sub_and_test(1, r, oldp);
}
/**
@@ -323,26 +328,11 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
-{
- return __refcount_sub_and_test(1, r, oldp);
-}
-
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return __refcount_dec_and_test(r, NULL);
}
-/**
- * refcount_dec - decrement a refcount
- * @r: the refcount
- *
- * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
- * when saturated at REFCOUNT_SATURATED.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before.
- */
static inline void __refcount_dec(refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(1, &r->refs);
@@ -354,6 +344,16 @@ static inline void __refcount_dec(refcount_t *r, int *oldp)
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
static inline void refcount_dec(refcount_t *r)
{
__refcount_dec(r, NULL);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 063cd120b459..76cd21fa5501 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -552,7 +552,6 @@ struct sched_dl_entity {
* overruns.
*/
unsigned int dl_throttled : 1;
- unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
@@ -571,6 +570,15 @@ struct sched_dl_entity {
* time.
*/
struct hrtimer inactive_timer;
+
+#ifdef CONFIG_RT_MUTEXES
+ /*
+ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
+ * pi_se points to the donor, otherwise points to the dl_se it belongs
+ * to (the original one/itself).
+ */
+ struct sched_dl_entity *pi_se;
+#endif
};
#ifdef CONFIG_UCLAMP_TASK
@@ -770,7 +778,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
- unsigned sched_remote_wakeup:1;
#ifdef CONFIG_PSI
unsigned sched_psi_wake_requeue:1;
#endif
@@ -780,6 +787,21 @@ struct task_struct {
/* Unserialized, strictly 'current' */
+ /*
+ * This field must not be in the scheduler word above due to wakelist
+ * queueing no longer being serialized by p->on_cpu. However:
+ *
+ * p->XXX = X; ttwu()
+ * schedule() if (p->on_rq && ..) // false
+ * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
+ * deactivate_task() ttwu_queue_wakelist())
+ * p->on_rq = 0; p->sched_remote_wakeup = Y;
+ *
+ * guarantees all stores of 'current' are visible before
+ * ->sched_remote_wakeup gets used, so it can be in this word.
+ */
+ unsigned sched_remote_wakeup:1;
+
/* Bit to tell LSMs we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 813614d4b71f..b83b3ae3c877 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -107,6 +107,7 @@ void seq_pad(struct seq_file *m, char c);
char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
int seq_write(struct seq_file *seq, const void *data, size_t len);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 99380c0825db..b390fdac1587 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -734,6 +734,25 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
return __spi_alloc_controller(host, size, true);
}
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave);
+
+static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __devm_spi_alloc_controller(dev, size, true);
+}
+
extern int spi_register_controller(struct spi_controller *ctlr);
extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 513913ff7486..fbdc65782195 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -5,6 +5,7 @@
#include <linux/dma-direction.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/limits.h>
struct device;
struct page;
@@ -45,13 +46,9 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1,
};
-extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t phys,
- size_t mapping_size,
- size_t alloc_size,
- enum dma_data_direction dir,
- unsigned long attrs);
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
+ size_t mapping_size, size_t alloc_size,
+ enum dma_data_direction dir, unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,