diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 60 |
1 files changed, 40 insertions, 20 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2cff0d491c6d..4d4e859b4b9c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -205,19 +205,24 @@ static inline int do_shrinker_shrink(struct shrinker *shrinker, * * Returns the number of slab objects which we shrunk. */ -unsigned long shrink_slab(struct shrink_control *shrink, +unsigned long shrink_slab(struct shrink_control *shrinkctl, unsigned long nr_pages_scanned, unsigned long lru_pages) { struct shrinker *shrinker; - unsigned long ret = 0; + unsigned long freed = 0; if (nr_pages_scanned == 0) nr_pages_scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) { - /* Assume we'll be able to shrink next time */ - ret = 1; + /* + * If we would return 0, our callers would understand that we + * have nothing else to shrink and give up trying. By returning + * 1 we keep it going and assume we'll be able to shrink next + * time. + */ + freed = 1; goto out; } @@ -225,14 +230,16 @@ unsigned long shrink_slab(struct shrink_control *shrink, unsigned long long delta; long total_scan; long max_pass; - int shrink_ret = 0; long nr; long new_nr; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; - max_pass = do_shrinker_shrink(shrinker, shrink, 0); - if (max_pass <= 0) + if (shrinker->count_objects) + max_pass = shrinker->count_objects(shrinker, shrinkctl); + else + max_pass = do_shrinker_shrink(shrinker, shrinkctl, 0); + if (max_pass == 0) continue; /* @@ -248,8 +255,8 @@ unsigned long shrink_slab(struct shrink_control *shrink, do_div(delta, lru_pages + 1); total_scan += delta; if (total_scan < 0) { - printk(KERN_ERR "shrink_slab: %pF negative objects to " - "delete nr=%ld\n", + printk(KERN_ERR + "shrink_slab: %pF negative objects to delete nr=%ld\n", shrinker->shrink, total_scan); total_scan = max_pass; } @@ -277,20 +284,33 @@ unsigned long shrink_slab(struct shrink_control *shrink, if (total_scan > max_pass * 2) total_scan = max_pass * 2; - trace_mm_shrink_slab_start(shrinker, shrink, nr, + trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, nr_pages_scanned, lru_pages, max_pass, delta, total_scan); while (total_scan >= batch_size) { - int nr_before; - nr_before = do_shrinker_shrink(shrinker, shrink, 0); - shrink_ret = do_shrinker_shrink(shrinker, shrink, - batch_size); - if (shrink_ret == -1) - break; - if (shrink_ret < nr_before) - ret += nr_before - shrink_ret; + if (shrinker->scan_objects) { + unsigned long ret; + shrinkctl->nr_to_scan = batch_size; + ret = shrinker->scan_objects(shrinker, shrinkctl); + + if (ret == SHRINK_STOP) + break; + freed += ret; + } else { + int nr_before; + long ret; + + nr_before = do_shrinker_shrink(shrinker, shrinkctl, 0); + ret = do_shrinker_shrink(shrinker, shrinkctl, + batch_size); + if (ret == -1) + break; + if (ret < nr_before) + freed += nr_before - ret; + } + count_vm_events(SLABS_SCANNED, batch_size); total_scan -= batch_size; @@ -308,12 +328,12 @@ unsigned long shrink_slab(struct shrink_control *shrink, else new_nr = atomic_long_read(&shrinker->nr_in_batch); - trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); + trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr); } up_read(&shrinker_rwsem); out: cond_resched(); - return ret; + return freed; } static inline int is_page_cache_freeable(struct page *page) |