diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 19:25:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 19:25:37 -0800 |
commit | b5dd0c658c31b469ccff1b637e5124851e7a4a1c (patch) | |
tree | dc0b31a5aa62bb4e1fa653a4f176c2faae51f9e0 /fs | |
parent | 610cd4eadec4f97acd25d3108b0e50d1362b3319 (diff) | |
parent | fe0436e10c8845aed24cad3a1c719efcd6e583eb (diff) | |
download | linux-stable-b5dd0c658c31b469ccff1b637e5124851e7a4a1c.tar.gz linux-stable-b5dd0c658c31b469ccff1b637e5124851e7a4a1c.tar.bz2 linux-stable-b5dd0c658c31b469ccff1b637e5124851e7a4a1c.zip |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- some of the rest of MM
- various misc things
- dynamic-debug updates
- checkpatch
- some epoll speedups
- autofs
- rapidio
- lib/, lib/lzo/ updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (83 commits)
samples/mic/mpssd/mpssd.h: remove duplicate header
kernel/fork.c: remove duplicated include
include/linux/relay.h: fix percpu annotation in struct rchan
arch/nios2/mm/fault.c: remove duplicate include
unicore32: stop printing the virtual memory layout
MAINTAINERS: fix GTA02 entry and mark as orphan
mm: create the new vm_fault_t type
arm, s390, unicore32: remove oneliner wrappers for memblock_alloc()
arch: simplify several early memory allocations
openrisc: simplify pte_alloc_one_kernel()
sh: prefer memblock APIs returning virtual address
microblaze: prefer memblock API returning virtual address
powerpc: prefer memblock APIs returning virtual address
lib/lzo: separate lzo-rle from lzo
lib/lzo: implement run-length encoding
lib/lzo: fast 8-byte copy on arm64
lib/lzo: 64-bit CTZ on arm64
lib/lzo: tidy-up ifdefs
ipc/sem.c: replace kvmalloc/memset with kvzalloc and use struct_size
ipc: annotate implicit fall through
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/autofs/autofs_i.h | 3 | ||||
-rw-r--r-- | fs/autofs/inode.c | 19 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 32 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 34 | ||||
-rw-r--r-- | fs/eventpoll.c | 173 | ||||
-rw-r--r-- | fs/exec.c | 9 | ||||
-rw-r--r-- | fs/fat/file.c | 1 | ||||
-rw-r--r-- | fs/namei.c | 2 |
8 files changed, 168 insertions, 105 deletions
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 3e59f0ed777b..70c132acdab1 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -105,6 +105,7 @@ struct autofs_wait_queue { #define AUTOFS_SBI_CATATONIC 0x0001 #define AUTOFS_SBI_STRICTEXPIRE 0x0002 +#define AUTOFS_SBI_IGNORE 0x0004 struct autofs_sb_info { u32 magic; @@ -215,6 +216,8 @@ static inline int autofs_prepare_pipe(struct file *pipe) return -EINVAL; /* We want a packet pipe */ pipe->f_flags |= O_DIRECT; + /* We don't expect -EAGAIN */ + pipe->f_flags &= ~O_NONBLOCK; return 0; } diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 078992eee299..80597b88718b 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -82,18 +82,20 @@ static int autofs_show_options(struct seq_file *m, struct dentry *root) seq_printf(m, ",maxproto=%d", sbi->max_proto); if (autofs_type_offset(sbi->type)) - seq_printf(m, ",offset"); + seq_puts(m, ",offset"); else if (autofs_type_direct(sbi->type)) - seq_printf(m, ",direct"); + seq_puts(m, ",direct"); else - seq_printf(m, ",indirect"); + seq_puts(m, ",indirect"); if (sbi->flags & AUTOFS_SBI_STRICTEXPIRE) - seq_printf(m, ",strictexpire"); + seq_puts(m, ",strictexpire"); + if (sbi->flags & AUTOFS_SBI_IGNORE) + seq_puts(m, ",ignore"); #ifdef CONFIG_CHECKPOINT_RESTORE if (sbi->pipe) seq_printf(m, ",pipe_ino=%ld", file_inode(sbi->pipe)->i_ino); else - seq_printf(m, ",pipe_ino=-1"); + seq_puts(m, ",pipe_ino=-1"); #endif return 0; } @@ -111,7 +113,8 @@ static const struct super_operations autofs_sops = { }; enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto, - Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire}; + Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire, + Opt_ignore}; static const match_table_t tokens = { {Opt_fd, "fd=%u"}, @@ -124,6 +127,7 @@ static const match_table_t tokens = { {Opt_direct, "direct"}, {Opt_offset, "offset"}, {Opt_strictexpire, "strictexpire"}, + {Opt_ignore, "ignore"}, {Opt_err, NULL} }; @@ -206,6 +210,9 @@ static int parse_options(char *options, case Opt_strictexpire: sbi->flags |= AUTOFS_SBI_STRICTEXPIRE; break; + case Opt_ignore: + sbi->flags |= AUTOFS_SBI_IGNORE; + break; default: return 1; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 54207327f98f..7d09d125f148 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -57,8 +57,6 @@ #endif static int load_elf_binary(struct linux_binprm *bprm); -static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, - int, int, unsigned long); #ifdef CONFIG_USELIB static int load_elf_library(struct file *); @@ -347,7 +345,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, #ifndef elf_map static unsigned long elf_map(struct file *filep, unsigned long addr, - struct elf_phdr *eppnt, int prot, int type, + const struct elf_phdr *eppnt, int prot, int type, unsigned long total_size) { unsigned long map_addr; @@ -387,7 +385,7 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, #endif /* !elf_map */ -static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) +static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr) { int i, first_idx = -1, last_idx = -1; @@ -414,12 +412,13 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) * header pointed to by elf_ex, into a newly allocated array. The caller is * responsible for freeing the allocated data. Returns an ERR_PTR upon failure. */ -static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex, +static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex, struct file *elf_file) { struct elf_phdr *elf_phdata = NULL; - int retval, size, err = -1; + int retval, err = -1; loff_t pos = elf_ex->e_phoff; + unsigned int size; /* * If the size of this structure has changed, then punt, since @@ -429,13 +428,9 @@ static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex, goto out; /* Sanity check the number of program headers... */ - if (elf_ex->e_phnum < 1 || - elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) - goto out; - /* ...and their total size. */ size = sizeof(struct elf_phdr) * elf_ex->e_phnum; - if (size > ELF_MIN_ALIGN) + if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN) goto out; elf_phdata = kmalloc(size, GFP_KERNEL); @@ -2033,7 +2028,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, struct elf_note_info *info, const kernel_siginfo_t *siginfo, struct pt_regs *regs) { - struct list_head *t; struct core_thread *ct; struct elf_thread_status *ets; @@ -2050,10 +2044,9 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, list_add(&ets->list, &info->thread_list); } - list_for_each(t, &info->thread_list) { + list_for_each_entry(ets, &info->thread_list, list) { int sz; - ets = list_entry(t, struct elf_thread_status, list); sz = elf_dump_thread_status(siginfo->si_signo, ets); info->thread_status_size += sz; } @@ -2117,20 +2110,17 @@ static size_t get_note_info_size(struct elf_note_info *info) static int write_note_info(struct elf_note_info *info, struct coredump_params *cprm) { + struct elf_thread_status *ets; int i; - struct list_head *t; for (i = 0; i < info->numnote; i++) if (!writenote(info->notes + i, cprm)) return 0; /* write out the thread status notes section */ - list_for_each(t, &info->thread_list) { - struct elf_thread_status *tmp = - list_entry(t, struct elf_thread_status, list); - - for (i = 0; i < tmp->num_notes; i++) - if (!writenote(&tmp->notes[i], cprm)) + list_for_each_entry(ets, &info->thread_list, list) { + for (i = 0; i < ets->num_notes; i++) + if (!writenote(&ets->notes[i], cprm)) return 0; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 85140913c0f5..129d26226e70 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3449,31 +3449,17 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); #if defined(CONFIG_DYNAMIC_DEBUG) #define btrfs_debug(fs_info, fmt, args...) \ -do { \ - DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ - btrfs_printk(fs_info, KERN_DEBUG fmt, ##args); \ -} while (0) -#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ -do { \ - DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ - btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args); \ -} while (0) + _dynamic_func_call_no_desc(fmt, btrfs_printk, \ + fs_info, KERN_DEBUG fmt, ##args) +#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ + _dynamic_func_call_no_desc(fmt, btrfs_printk_in_rcu, \ + fs_info, KERN_DEBUG fmt, ##args) #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ -do { \ - DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ - btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, \ - ##args);\ -} while (0) -#define btrfs_debug_rl(fs_info, fmt, args...) \ -do { \ - DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ - if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ - btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, \ - ##args); \ -} while (0) + _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \ + fs_info, KERN_DEBUG fmt, ##args) +#define btrfs_debug_rl(fs_info, fmt, args...) \ + _dynamic_func_call_no_desc(fmt, btrfs_printk_ratelimited, \ + fs_info, KERN_DEBUG fmt, ##args) #elif defined(DEBUG) #define btrfs_debug(fs_info, fmt, args...) \ btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a5d219d920e7..4a0e98d87fcc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -50,10 +50,10 @@ * * 1) epmutex (mutex) * 2) ep->mtx (mutex) - * 3) ep->wq.lock (spinlock) + * 3) ep->lock (rwlock) * * The acquire order is the one listed above, from 1 to 3. - * We need a spinlock (ep->wq.lock) because we manipulate objects + * We need a rwlock (ep->lock) because we manipulate objects * from inside the poll callback, that might be triggered from * a wake_up() that in turn might be called from IRQ context. * So we can't sleep inside the poll callback and hence we need @@ -85,7 +85,7 @@ * of epoll file descriptors, we use the current recursion depth as * the lockdep subkey. * It is possible to drop the "ep->mtx" and to use the global - * mutex "epmutex" (together with "ep->wq.lock") to have it working, + * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->mtx" will guarantee @@ -182,8 +182,6 @@ struct epitem { * This structure is stored inside the "private_data" member of the file * structure and represents the main data structure for the eventpoll * interface. - * - * Access to it is protected by the lock inside wq. */ struct eventpoll { /* @@ -203,13 +201,16 @@ struct eventpoll { /* List of ready file descriptors */ struct list_head rdllist; + /* Lock which protects rdllist and ovflist */ + rwlock_t lock; + /* RB tree root used to store monitored fd structs */ struct rb_root_cached rbr; /* * This is a single linked list that chains all the "struct epitem" that * happened while transferring ready events to userspace w/out - * holding ->wq.lock. + * holding ->lock. */ struct epitem *ovflist; @@ -697,17 +698,17 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, * because we want the "sproc" callback to be able to do it * in a lockless way. */ - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); list_splice_init(&ep->rdllist, &txlist); WRITE_ONCE(ep->ovflist, NULL); - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); /* * Now call the callback function. */ res = (*sproc)(ep, &txlist, priv); - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); /* * During the time we spent inside the "sproc" callback, some * other events might have been queued by the poll callback. @@ -722,7 +723,11 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, * contain them, and the list_splice() below takes care of them. */ if (!ep_is_linked(epi)) { - list_add_tail(&epi->rdllink, &ep->rdllist); + /* + * ->ovflist is LIFO, so we have to reverse it in order + * to keep in FIFO. + */ + list_add(&epi->rdllink, &ep->rdllist); ep_pm_stay_awake(epi); } } @@ -745,11 +750,11 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep, * the ->poll() wait list (delayed after we release the lock). */ if (waitqueue_active(&ep->wq)) - wake_up_locked(&ep->wq); + wake_up(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); if (!ep_locked) mutex_unlock(&ep->mtx); @@ -789,10 +794,10 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) rb_erase_cached(&epi->rbn, &ep->rbr); - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); if (ep_is_linked(epi)) list_del_init(&epi->rdllink); - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); wakeup_source_unregister(ep_wakeup_source(epi)); /* @@ -842,7 +847,7 @@ static void ep_free(struct eventpoll *ep) * Walks through the whole tree by freeing each "struct epitem". At this * point we are sure no poll callbacks will be lingering around, and also by * holding "epmutex" we can be sure that no file cleanup code will hit - * us during this operation. So we can avoid the lock on "ep->wq.lock". + * us during this operation. So we can avoid the lock on "ep->lock". * We do not need to lock ep->mtx, either, we only do it to prevent * a lockdep warning. */ @@ -1023,6 +1028,7 @@ static int ep_alloc(struct eventpoll **pep) goto free_uid; mutex_init(&ep->mtx); + rwlock_init(&ep->lock); init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->poll_wait); INIT_LIST_HEAD(&ep->rdllist); @@ -1112,21 +1118,107 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, } #endif /* CONFIG_CHECKPOINT_RESTORE */ +/** + * Adds a new entry to the tail of the list in a lockless way, i.e. + * multiple CPUs are allowed to call this function concurrently. + * + * Beware: it is necessary to prevent any other modifications of the + * existing list until all changes are completed, in other words + * concurrent list_add_tail_lockless() calls should be protected + * with a read lock, where write lock acts as a barrier which + * makes sure all list_add_tail_lockless() calls are fully + * completed. + * + * Also an element can be locklessly added to the list only in one + * direction i.e. either to the tail either to the head, otherwise + * concurrent access will corrupt the list. + * + * Returns %false if element has been already added to the list, %true + * otherwise. + */ +static inline bool list_add_tail_lockless(struct list_head *new, + struct list_head *head) +{ + struct list_head *prev; + + /* + * This is simple 'new->next = head' operation, but cmpxchg() + * is used in order to detect that same element has been just + * added to the list from another CPU: the winner observes + * new->next == new. + */ + if (cmpxchg(&new->next, new, head) != new) + return false; + + /* + * Initially ->next of a new element must be updated with the head + * (we are inserting to the tail) and only then pointers are atomically + * exchanged. XCHG guarantees memory ordering, thus ->next should be + * updated before pointers are actually swapped and pointers are + * swapped before prev->next is updated. + */ + + prev = xchg(&head->prev, new); + + /* + * It is safe to modify prev->next and new->prev, because a new element + * is added only to the tail and new->next is updated before XCHG. + */ + + prev->next = new; + new->prev = prev; + + return true; +} + +/** + * Chains a new epi entry to the tail of the ep->ovflist in a lockless way, + * i.e. multiple CPUs are allowed to call this function concurrently. + * + * Returns %false if epi element has been already chained, %true otherwise. + */ +static inline bool chain_epi_lockless(struct epitem *epi) +{ + struct eventpoll *ep = epi->ep; + + /* Check that the same epi has not been just chained from another CPU */ + if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) + return false; + + /* Atomically exchange tail */ + epi->next = xchg(&ep->ovflist, epi); + + return true; +} + /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they * have events to report. + * + * This callback takes a read lock in order not to content with concurrent + * events from another file descriptors, thus all modifications to ->rdllist + * or ->ovflist are lockless. Read lock is paired with the write lock from + * ep_scan_ready_list(), which stops all list modifications and guarantees + * that lists state is seen correctly. + * + * Another thing worth to mention is that ep_poll_callback() can be called + * concurrently for the same @epi from different CPUs if poll table was inited + * with several wait queues entries. Plural wakeup from different CPUs of a + * single wait queue is serialized by wq.lock, but the case when multiple wait + * queues are used should be detected accordingly. This is detected using + * cmpxchg() operation. */ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; - unsigned long flags; struct epitem *epi = ep_item_from_wait(wait); struct eventpoll *ep = epi->ep; __poll_t pollflags = key_to_poll(key); + unsigned long flags; int ewake = 0; - spin_lock_irqsave(&ep->wq.lock, flags); + read_lock_irqsave(&ep->lock, flags); ep_set_busy_poll_napi_id(epi); @@ -1155,24 +1247,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * chained in ep->ovflist and requeued later on. */ if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { - if (epi->next == EP_UNACTIVE_PTR) { - epi->next = READ_ONCE(ep->ovflist); - WRITE_ONCE(ep->ovflist, epi); - if (epi->ws) { - /* - * Activate ep->ws since epi->ws may get - * deactivated at any time. - */ - __pm_stay_awake(ep->ws); - } - - } + if (epi->next == EP_UNACTIVE_PTR && + chain_epi_lockless(epi)) + ep_pm_stay_awake_rcu(epi); goto out_unlock; } /* If this file is already in the ready list we exit soon */ - if (!ep_is_linked(epi)) { - list_add_tail(&epi->rdllink, &ep->rdllist); + if (!ep_is_linked(epi) && + list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) { ep_pm_stay_awake_rcu(epi); } @@ -1197,13 +1280,13 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v break; } } - wake_up_locked(&ep->wq); + wake_up(&ep->wq); } if (waitqueue_active(&ep->poll_wait)) pwake++; out_unlock: - spin_unlock_irqrestore(&ep->wq.lock, flags); + read_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) @@ -1488,7 +1571,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, goto error_remove_epi; /* We have to drop the new item inside our item list to keep track of it */ - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); /* record NAPI ID of new item if present */ ep_set_busy_poll_napi_id(epi); @@ -1500,12 +1583,12 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) - wake_up_locked(&ep->wq); + wake_up(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); atomic_long_inc(&ep->user->epoll_watches); @@ -1531,10 +1614,10 @@ error_unregister: * list, since that is used/cleaned only inside a section bound by "mtx". * And ep_insert() is called with "mtx" held. */ - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); if (ep_is_linked(epi)) list_del_init(&epi->rdllink); - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); wakeup_source_unregister(ep_wakeup_source(epi)); @@ -1578,9 +1661,9 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, * 1) Flush epi changes above to other CPUs. This ensures * we do not miss events from ep_poll_callback if an * event occurs immediately after we call f_op->poll(). - * We need this because we did not take ep->wq.lock while + * We need this because we did not take ep->lock while * changing epi above (but ep_poll_callback does take - * ep->wq.lock). + * ep->lock). * * 2) We also need to ensure we do not miss _past_ events * when calling f_op->poll(). This barrier also @@ -1599,18 +1682,18 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, * list, push it inside. */ if (ep_item_poll(epi, &pt, 1)) { - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); if (!ep_is_linked(epi)) { list_add_tail(&epi->rdllink, &ep->rdllist); ep_pm_stay_awake(epi); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) - wake_up_locked(&ep->wq); + wake_up(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); } /* We have to call this outside the lock */ @@ -1771,9 +1854,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, */ timed_out = 1; - spin_lock_irq(&ep->wq.lock); + write_lock_irq(&ep->lock); eavail = ep_events_available(ep); - spin_unlock_irq(&ep->wq.lock); + write_unlock_irq(&ep->lock); goto send_events; } diff --git a/fs/exec.c b/fs/exec.c index 74f3672146a7..2e0033348d8e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1563,7 +1563,7 @@ static void bprm_fill_uid(struct linux_binprm *bprm) /* * Fill the binprm structure from the inode. - * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes + * Check permissions, then read the first BINPRM_BUF_SIZE bytes * * This may be called multiple times for binary chains (scripts for example). */ @@ -1944,15 +1944,10 @@ EXPORT_SYMBOL(set_binfmt); */ void set_dumpable(struct mm_struct *mm, int value) { - unsigned long old, new; - if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) return; - do { - old = READ_ONCE(mm->flags); - new = (old & ~MMF_DUMPABLE_MASK) | value; - } while (cmpxchg(&mm->flags, old, new) != old); + set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value); } SYSCALL_DEFINE3(execve, diff --git a/fs/fat/file.c b/fs/fat/file.c index 13935ee99e1e..b3bed32946b1 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -214,6 +214,7 @@ const struct file_operations fat_file_operations = { #endif .fsync = fat_file_fsync, .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, .fallocate = fat_fallocate, }; diff --git a/fs/namei.c b/fs/namei.c index 87d7710a2e1d..0a8c5c27f90e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -39,7 +39,6 @@ #include <linux/bitops.h> #include <linux/init_task.h> #include <linux/uaccess.h> -#include <linux/build_bug.h> #include "internal.h" #include "mount.h" @@ -131,7 +130,6 @@ getname_flags(const char __user *filename, int flags, int *empty) struct filename *result; char *kname; int len; - BUILD_BUG_ON(offsetof(struct filename, iname) % sizeof(long) != 0); result = audit_reusename(filename); if (result) |