diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-10-24 10:07:42 +0900 |
---|---|---|
committer | Pekka Enberg <penberg@iki.fi> | 2013-10-24 20:17:31 +0300 |
commit | 68126702b419fd26ef4946e314bb3a1f57d3a53f (patch) | |
tree | af7acaf8d13921ab34271cb500d8454940a12e86 /include | |
parent | 07d417a1c6f1e386a2276b0cae8ae1d14b8a32cc (diff) | |
download | linux-stable-68126702b419fd26ef4946e314bb3a1f57d3a53f.tar.gz linux-stable-68126702b419fd26ef4946e314bb3a1f57d3a53f.tar.bz2 linux-stable-68126702b419fd26ef4946e314bb3a1f57d3a53f.zip |
slab: overloading the RCU head over the LRU for RCU free
With build-time size checking, we can overload the RCU head over the LRU
of struct page to free pages of a slab in rcu context. This really help to
implement to overload the struct slab over the struct page and this
eventually reduce memory usage and cache footprint of the SLAB.
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm_types.h | 3 | ||||
-rw-r--r-- | include/linux/slab.h | 9 |
2 files changed, 11 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index faf4b7c1ad12..959cb369b197 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -130,6 +130,9 @@ struct page { struct list_head list; /* slobs list of pages */ struct slab *slab_page; /* slab fields */ + struct rcu_head rcu_head; /* Used by SLAB + * when destroying via RCU + */ }; /* Remainder is not double word aligned */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 6c5cc0ea8713..caaad51fee1f 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -51,7 +51,14 @@ * } * rcu_read_unlock(); * - * See also the comment on struct slab_rcu in mm/slab.c. + * This is useful if we need to approach a kernel structure obliquely, + * from its address obtained without the usual locking. We can lock + * the structure to stabilize it and check it's still at the given address, + * only if we can be sure that the memory has not been meanwhile reused + * for some other kind of object (which our subsystem's lock might corrupt). + * + * rcu_read_lock before reading the address, then rcu_read_unlock after + * taking the spinlock within the structure expected at that address. */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |