diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-04-30 15:26:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 17:04:00 -0700 |
commit | 1e01c968db3d0aebd48e31db15f24516b03128df (patch) | |
tree | 6f8ebed201fdc426a6926b4a2235e8dd2538f025 /mm/frontswap.c | |
parent | 905cd0e1bf9ffe82d6906a01fd974ea0f70be97a (diff) | |
download | linux-stable-1e01c968db3d0aebd48e31db15f24516b03128df.tar.gz linux-stable-1e01c968db3d0aebd48e31db15f24516b03128df.tar.bz2 linux-stable-1e01c968db3d0aebd48e31db15f24516b03128df.zip |
frontswap: make frontswap_init use a pointer for the ops
This simplifies the code in the frontswap - we can get rid of the
'backend_registered' test and instead check against frontswap_ops.
[v1: Rebase on top of 703ba7fe5e0 (ramster->zcache move]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/frontswap.c')
-rw-r--r-- | mm/frontswap.c | 38 |
1 files changed, 18 insertions, 20 deletions
diff --git a/mm/frontswap.c b/mm/frontswap.c index cbd2b8af8129..e44c9cbd1443 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -24,7 +24,7 @@ * frontswap_ops is set by frontswap_register_ops to contain the pointers * to the frontswap "backend" implementation functions. */ -static struct frontswap_ops frontswap_ops __read_mostly; +static struct frontswap_ops *frontswap_ops __read_mostly; /* * This global enablement flag reduces overhead on systems where frontswap_ops @@ -108,41 +108,39 @@ static inline void inc_frontswap_invalidates(void) { } * * The time between the backend being registered and the swap file system * calling the backend (via the frontswap_* functions) is indeterminate as - * backend_registered is not atomic_t (or a value guarded by a spinlock). + * frontswap_ops is not atomic_t (or a value guarded by a spinlock). * That is OK as we are comfortable missing some of these calls to the newly * registered backend. * * Obviously the opposite (unloading the backend) must be done after all * the frontswap_[store|load|invalidate_area|invalidate_page] start - * ignorning or failing the requests - at which point backend_registered + * ignorning or failing the requests - at which point frontswap_ops * would have to be made in some fashion atomic. */ static DECLARE_BITMAP(need_init, MAX_SWAPFILES); -static bool backend_registered __read_mostly; /* * Register operations for frontswap, returning previous thus allowing * detection of multiple backends and possible nesting. */ -struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) +struct frontswap_ops *frontswap_register_ops(struct frontswap_ops *ops) { - struct frontswap_ops old = frontswap_ops; + struct frontswap_ops *old = frontswap_ops; int i; - frontswap_ops = *ops; frontswap_enabled = true; for (i = 0; i < MAX_SWAPFILES; i++) { if (test_and_clear_bit(i, need_init)) - (*frontswap_ops.init)(i); + ops->init(i); } /* - * We MUST have backend_registered set _after_ the frontswap_init's + * We MUST have frontswap_ops set _after_ the frontswap_init's * have been called. Otherwise __frontswap_store might fail. Hence * the barrier to make sure compiler does not re-order us. */ barrier(); - backend_registered = true; + frontswap_ops = ops; return old; } EXPORT_SYMBOL(frontswap_register_ops); @@ -172,11 +170,11 @@ void __frontswap_init(unsigned type) { struct swap_info_struct *sis = swap_info[type]; - if (backend_registered) { + if (frontswap_ops) { BUG_ON(sis == NULL); if (sis->frontswap_map == NULL) return; - (*frontswap_ops.init)(type); + frontswap_ops->init(type); } else { BUG_ON(type > MAX_SWAPFILES); set_bit(type, need_init); @@ -206,7 +204,7 @@ int __frontswap_store(struct page *page) struct swap_info_struct *sis = swap_info[type]; pgoff_t offset = swp_offset(entry); - if (!backend_registered) { + if (!frontswap_ops) { inc_frontswap_failed_stores(); return ret; } @@ -215,7 +213,7 @@ int __frontswap_store(struct page *page) BUG_ON(sis == NULL); if (frontswap_test(sis, offset)) dup = 1; - ret = frontswap_ops.store(type, offset, page); + ret = frontswap_ops->store(type, offset, page); if (ret == 0) { frontswap_set(sis, offset); inc_frontswap_succ_stores(); @@ -250,13 +248,13 @@ int __frontswap_load(struct page *page) struct swap_info_struct *sis = swap_info[type]; pgoff_t offset = swp_offset(entry); - if (!backend_registered) + if (!frontswap_ops) return ret; BUG_ON(!PageLocked(page)); BUG_ON(sis == NULL); if (frontswap_test(sis, offset)) - ret = frontswap_ops.load(type, offset, page); + ret = frontswap_ops->load(type, offset, page); if (ret == 0) { inc_frontswap_loads(); if (frontswap_tmem_exclusive_gets_enabled) { @@ -276,12 +274,12 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset) { struct swap_info_struct *sis = swap_info[type]; - if (!backend_registered) + if (!frontswap_ops) return; BUG_ON(sis == NULL); if (frontswap_test(sis, offset)) { - frontswap_ops.invalidate_page(type, offset); + frontswap_ops->invalidate_page(type, offset); __frontswap_clear(sis, offset); inc_frontswap_invalidates(); } @@ -296,11 +294,11 @@ void __frontswap_invalidate_area(unsigned type) { struct swap_info_struct *sis = swap_info[type]; - if (backend_registered) { + if (frontswap_ops) { BUG_ON(sis == NULL); if (sis->frontswap_map == NULL) return; - (*frontswap_ops.invalidate_area)(type); + frontswap_ops->invalidate_area(type); atomic_set(&sis->frontswap_pages, 0); memset(sis->frontswap_map, 0, sis->max / sizeof(long)); } |