diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-11 09:04:23 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-11 09:04:23 +0900 |
commit | ce40be7a820bb393ac4ac69865f018d2f4038cf0 (patch) | |
tree | b1fe5a93346eb06f22b1c303d63ec5456d7212ab /include/linux/percpu-rwsem.h | |
parent | ba0a5a36f60e4c1152af3a2ae2813251974405bf (diff) | |
parent | 02f3939e1a9357b7c370a4a69717cf9c02452737 (diff) | |
download | linux-ce40be7a820bb393ac4ac69865f018d2f4038cf0.tar.gz linux-ce40be7a820bb393ac4ac69865f018d2f4038cf0.tar.bz2 linux-ce40be7a820bb393ac4ac69865f018d2f4038cf0.zip |
Merge branch 'for-3.7/core' of git://git.kernel.dk/linux-block
Pull block IO update from Jens Axboe:
"Core block IO bits for 3.7. Not a huge round this time, it contains:
- First series from Kent cleaning up and generalizing bio allocation
and freeing.
- WRITE_SAME support from Martin.
- Mikulas patches to prevent O_DIRECT crashes when someone changes
the block size of a device.
- Make bio_split() work on data-less bio's (like trim/discards).
- A few other minor fixups."
Fixed up silent semantic mis-merge as per Mikulas Patocka and Andrew
Morton. It is due to the VM no longer using a prio-tree (see commit
6b2dbba8b6ac: "mm: replace vma prio_tree with an interval tree").
So make set_blocksize() use mapping_mapped() instead of open-coding the
internal VM knowledge that has changed.
* 'for-3.7/core' of git://git.kernel.dk/linux-block: (26 commits)
block: makes bio_split support bio without data
scatterlist: refactor the sg_nents
scatterlist: add sg_nents
fs: fix include/percpu-rwsem.h export error
percpu-rw-semaphore: fix documentation typos
fs/block_dev.c:1644:5: sparse: symbol 'blkdev_mmap' was not declared
blockdev: turn a rw semaphore into a percpu rw semaphore
Fix a crash when block device is read and block size is changed at the same time
block: fix request_queue->flags initialization
block: lift the initial queue bypass mode on blk_register_queue() instead of blk_init_allocated_queue()
block: ioctl to zero block ranges
block: Make blkdev_issue_zeroout use WRITE SAME
block: Implement support for WRITE SAME
block: Consolidate command flag and queue limit checks for merges
block: Clean up special command handling logic
block/blk-tag.c: Remove useless kfree
block: remove the duplicated setting for congestion_threshold
block: reject invalid queue attribute values
block: Add bio_clone_bioset(), bio_clone_kmalloc()
block: Consolidate bio_alloc_bioset(), bio_kmalloc()
...
Diffstat (limited to 'include/linux/percpu-rwsem.h')
-rw-r--r-- | include/linux/percpu-rwsem.h | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h new file mode 100644 index 000000000000..cf80f7e5277f --- /dev/null +++ b/include/linux/percpu-rwsem.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_PERCPU_RWSEM_H +#define _LINUX_PERCPU_RWSEM_H + +#include <linux/mutex.h> +#include <linux/percpu.h> +#include <linux/rcupdate.h> +#include <linux/delay.h> + +struct percpu_rw_semaphore { + unsigned __percpu *counters; + bool locked; + struct mutex mtx; +}; + +static inline void percpu_down_read(struct percpu_rw_semaphore *p) +{ + rcu_read_lock(); + if (unlikely(p->locked)) { + rcu_read_unlock(); + mutex_lock(&p->mtx); + this_cpu_inc(*p->counters); + mutex_unlock(&p->mtx); + return; + } + this_cpu_inc(*p->counters); + rcu_read_unlock(); +} + +static inline void percpu_up_read(struct percpu_rw_semaphore *p) +{ + /* + * On X86, write operation in this_cpu_dec serves as a memory unlock + * barrier (i.e. memory accesses may be moved before the write, but + * no memory accesses are moved past the write). + * On other architectures this may not be the case, so we need smp_mb() + * there. + */ +#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE)) + barrier(); +#else + smp_mb(); +#endif + this_cpu_dec(*p->counters); +} + +static inline unsigned __percpu_count(unsigned __percpu *counters) +{ + unsigned total = 0; + int cpu; + + for_each_possible_cpu(cpu) + total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu)); + + return total; +} + +static inline void percpu_down_write(struct percpu_rw_semaphore *p) +{ + mutex_lock(&p->mtx); + p->locked = true; + synchronize_rcu(); + while (__percpu_count(p->counters)) + msleep(1); + smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */ +} + +static inline void percpu_up_write(struct percpu_rw_semaphore *p) +{ + p->locked = false; + mutex_unlock(&p->mtx); +} + +static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p) +{ + p->counters = alloc_percpu(unsigned); + if (unlikely(!p->counters)) + return -ENOMEM; + p->locked = false; + mutex_init(&p->mtx); + return 0; +} + +static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p) +{ + free_percpu(p->counters); + p->counters = NULL; /* catch use after free bugs */ +} + +#endif |