summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2009-07-08 13:24:12 +0200
committerRafael J. Wysocki <rjw@sisk.pl>2009-09-14 20:26:59 +0200
commitef4aede3f10d82adef1fb044b565ba5f08f851e0 (patch)
tree58566c694aa036e75df401979bd8a85ffa718f18
parent64a473cb74a88cb4991edf985d55a266e65292e1 (diff)
downloadlinux-stable-ef4aede3f10d82adef1fb044b565ba5f08f851e0.tar.gz
linux-stable-ef4aede3f10d82adef1fb044b565ba5f08f851e0.tar.bz2
linux-stable-ef4aede3f10d82adef1fb044b565ba5f08f851e0.zip
PM/Hibernate: Do not try to allocate too much memory too hard (rev. 2)
We want to avoid attempting to free too much memory too hard during hibernation, so estimate the minimum size of the image to use as the lower limit for preallocating memory. The approach here is based on the (experimental) observation that we can't free more page frames than the sum of: * global_page_state(NR_SLAB_RECLAIMABLE) * global_page_state(NR_ACTIVE_ANON) * global_page_state(NR_INACTIVE_ANON) * global_page_state(NR_ACTIVE_FILE) * global_page_state(NR_INACTIVE_FILE) minus * global_page_state(NR_FILE_MAPPED) Namely, if this number is subtracted from the number of saveable pages in the system, we get a good estimate of the minimum reasonable size of a hibernation image. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Wu Fengguang <fengguang.wu@intel.com>
-rw-r--r--kernel/power/snapshot.c43
1 files changed, 39 insertions, 4 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 2b1a7bc24c91..0a06b114dd33 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1204,6 +1204,36 @@ static void free_unnecessary_pages(void)
}
/**
+ * minimum_image_size - Estimate the minimum acceptable size of an image
+ * @saveable: Number of saveable pages in the system.
+ *
+ * We want to avoid attempting to free too much memory too hard, so estimate the
+ * minimum acceptable size of a hibernation image to use as the lower limit for
+ * preallocating memory.
+ *
+ * We assume that the minimum image size should be proportional to
+ *
+ * [number of saveable pages] - [number of pages that can be freed in theory]
+ *
+ * where the second term is the sum of (1) reclaimable slab pages, (2) active
+ * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
+ * minus mapped file pages.
+ */
+static unsigned long minimum_image_size(unsigned long saveable)
+{
+ unsigned long size;
+
+ size = global_page_state(NR_SLAB_RECLAIMABLE)
+ + global_page_state(NR_ACTIVE_ANON)
+ + global_page_state(NR_INACTIVE_ANON)
+ + global_page_state(NR_ACTIVE_FILE)
+ + global_page_state(NR_INACTIVE_FILE)
+ - global_page_state(NR_FILE_MAPPED);
+
+ return saveable <= size ? 0 : saveable - size;
+}
+
+/**
* hibernate_preallocate_memory - Preallocate memory for hibernation image
*
* To create a hibernation image it is necessary to make a copy of every page
@@ -1220,8 +1250,8 @@ static void free_unnecessary_pages(void)
*
* If image_size is set below the number following from the above formula,
* the preallocation of memory is continued until the total number of saveable
- * pages in the system is below the requested image size or it is impossible to
- * allocate more memory, whichever happens first.
+ * pages in the system is below the requested image size or the minimum
+ * acceptable image size returned by minimum_image_size(), whichever is greater.
*/
int hibernate_preallocate_memory(void)
{
@@ -1282,6 +1312,11 @@ int hibernate_preallocate_memory(void)
goto out;
}
+ /* Estimate the minimum size of the image. */
+ pages = minimum_image_size(saveable);
+ if (size < pages)
+ size = min_t(unsigned long, pages, max_size);
+
/*
* Let the memory management subsystem know that we're going to need a
* large number of page frames to allocate and make it free some memory.
@@ -1294,8 +1329,8 @@ int hibernate_preallocate_memory(void)
* The number of saveable pages in memory was too high, so apply some
* pressure to decrease it. First, make room for the largest possible
* image and fail if that doesn't work. Next, try to decrease the size
- * of the image as much as indicated by image_size using allocations
- * from highmem and non-highmem zones separately.
+ * of the image as much as indicated by 'size' using allocations from
+ * highmem and non-highmem zones separately.
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = (count - max_size) - pages_highmem;