diff options
Diffstat (limited to 'include/linux/mm_types.h')
-rw-r--r-- | include/linux/mm_types.h | 135 |
1 files changed, 104 insertions, 31 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 027935c86c68..5b42f1b34eb7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -30,23 +30,86 @@ struct address_space; * moment. Note that we have no way to track which tasks are using * a page, though if it is a pagecache page, rmap structures can tell us * who is mapping it. + * + * The objects in struct page are organized in double word blocks in + * order to allows us to use atomic double word operations on portions + * of struct page. That is currently only used by slub but the arrangement + * allows the use of atomic double word operations on the flags/mapping + * and lru list pointers also. */ struct page { + /* First double word block */ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ - atomic_t _count; /* Usage count, see below. */ + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + /* Second double word */ + struct { + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* slub first free object */ + }; + + union { + /* Used for cmpxchg_double in slub */ + unsigned long counters; + + struct { + + union { + /* + * Count of ptes mapped in + * mms, to show when page is + * mapped & limit reverse map + * searches. + * + * Used also for tail pages + * refcounting instead of + * _count. Tail pages cannot + * be mapped and keeping the + * tail page _count zero at + * all times guarantees + * get_page_unless_zero() will + * never succeed on tail + * pages. + */ + atomic_t _mapcount; + + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; + atomic_t _count; /* Usage count, see below. */ + }; + }; + }; + + /* Third double word block */ union { - atomic_t _mapcount; /* Count of ptes mapped in mms, - * to show when page is mapped - * & limit reverse map searches. + struct list_head lru; /* Pageout list, eg. active_list + * protected by zone->lru_lock ! */ - struct { /* SLUB */ - u16 inuse; - u16 objects; + struct { /* slub per cpu partial pages */ + struct page *next; /* Next partial slab */ +#ifdef CONFIG_64BIT + int pages; /* Nr of partial slabs left */ + int pobjects; /* Approximate # of objects */ +#else + short int pages; + short int pobjects; +#endif }; }; + + /* Remainder is not double word aligned */ union { - struct { unsigned long private; /* Mapping-private opaque data: * usually used for buffer_heads * if PagePrivate set; used for @@ -54,27 +117,13 @@ struct page { * indicates order in the buddy * system if PG_buddy is set. */ - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ - }; #if USE_SPLIT_PTLOCKS - spinlock_t ptl; + spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ - struct page *first_page; /* Compound tail pages */ + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ }; - union { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* SLUB: freelist req. slab lock */ - }; - struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! - */ + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with @@ -100,6 +149,26 @@ struct page { */ void *shadow; #endif +} +/* + * If another subsystem starts using the double word pairing for atomic + * operations on struct page then it must change the #if to ensure + * proper alignment of the page struct. + */ +#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) + __attribute__((__aligned__(2*sizeof(unsigned long)))) +#endif +; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif }; typedef unsigned long __nocast vm_flags_t; @@ -248,8 +317,15 @@ struct mm_struct { unsigned long hiwater_rss; /* High-watermark of RSS usage */ unsigned long hiwater_vm; /* High-water virtual memory usage */ - unsigned long total_vm, locked_vm, shared_vm, exec_vm; - unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; + unsigned long total_vm; /* Total pages mapped */ + unsigned long locked_vm; /* Pages that have PG_mlocked set */ + unsigned long pinned_vm; /* Refcount permanently increased */ + unsigned long shared_vm; /* Shared pages (files) */ + unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ + unsigned long stack_vm; /* VM_GROWSUP/DOWN */ + unsigned long reserved_vm; /* VM_RESERVED|VM_IO pages */ + unsigned long def_flags; + unsigned long nr_ptes; /* Page table pages */ unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; @@ -280,9 +356,6 @@ struct mm_struct { unsigned int token_priority; unsigned int last_interval; - /* How many tasks sharing this mm are OOM_DISABLE */ - atomic_t oom_disable_count; - unsigned long flags; /* Must use atomic bitops to access the bits */ struct core_state *core_state; /* coredumping support */ |