summaryrefslogtreecommitdiffstats
path: root/mm/damon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-10 17:53:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-10 17:53:04 -0700
commit27bc50fc90647bbf7b734c3fc306a5e61350da53 (patch)
tree75fc525fbfec8c07a97a7875a89592317bcad4ca /mm/damon
parent70442fc54e6889a2a77f0e9554e8188a1557f00e (diff)
parentbbff39cc6cbcb86ccfacb2dcafc79912a9f9df69 (diff)
downloadlinux-stable-27bc50fc90647bbf7b734c3fc306a5e61350da53.tar.gz
linux-stable-27bc50fc90647bbf7b734c3fc306a5e61350da53.tar.bz2
linux-stable-27bc50fc90647bbf7b734c3fc306a5e61350da53.zip
Merge tag 'mm-stable-2022-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: - Yu Zhao's Multi-Gen LRU patches are here. They've been under test in linux-next for a couple of months without, to my knowledge, any negative reports (or any positive ones, come to that). - Also the Maple Tree from Liam Howlett. An overlapping range-based tree for vmas. It it apparently slightly more efficient in its own right, but is mainly targeted at enabling work to reduce mmap_lock contention. Liam has identified a number of other tree users in the kernel which could be beneficially onverted to mapletrees. Yu Zhao has identified a hard-to-hit but "easy to fix" lockdep splat at [1]. This has yet to be addressed due to Liam's unfortunately timed vacation. He is now back and we'll get this fixed up. - Dmitry Vyukov introduces KMSAN: the Kernel Memory Sanitizer. It uses clang-generated instrumentation to detect used-unintialized bugs down to the single bit level. KMSAN keeps finding bugs. New ones, as well as the legacy ones. - Yang Shi adds a userspace mechanism (madvise) to induce a collapse of memory into THPs. - Zach O'Keefe has expanded Yang Shi's madvise(MADV_COLLAPSE) to support file/shmem-backed pages. - userfaultfd updates from Axel Rasmussen - zsmalloc cleanups from Alexey Romanov - cleanups from Miaohe Lin: vmscan, hugetlb_cgroup, hugetlb and memory-failure - Huang Ying adds enhancements to NUMA balancing memory tiering mode's page promotion, with a new way of detecting hot pages. - memcg updates from Shakeel Butt: charging optimizations and reduced memory consumption. - memcg cleanups from Kairui Song. - memcg fixes and cleanups from Johannes Weiner. - Vishal Moola provides more folio conversions - Zhang Yi removed ll_rw_block() :( - migration enhancements from Peter Xu - migration error-path bugfixes from Huang Ying - Aneesh Kumar added ability for a device driver to alter the memory tiering promotion paths. For optimizations by PMEM drivers, DRM drivers, etc. - vma merging improvements from Jakub Matěn. - NUMA hinting cleanups from David Hildenbrand. - xu xin added aditional userspace visibility into KSM merging activity. - THP & KSM code consolidation from Qi Zheng. - more folio work from Matthew Wilcox. - KASAN updates from Andrey Konovalov. - DAMON cleanups from Kaixu Xia. - DAMON work from SeongJae Park: fixes, cleanups. - hugetlb sysfs cleanups from Muchun Song. - Mike Kravetz fixes locking issues in hugetlbfs and in hugetlb core. Link: https://lkml.kernel.org/r/CAOUHufZabH85CeUN-MEMgL8gJGzJEWUrkiM58JkTbBhh-jew0Q@mail.gmail.com [1] * tag 'mm-stable-2022-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (555 commits) hugetlb: allocate vma lock for all sharable vmas hugetlb: take hugetlb vma_lock when clearing vma_lock->vma pointer hugetlb: fix vma lock handling during split vma and range unmapping mglru: mm/vmscan.c: fix imprecise comments mm/mglru: don't sync disk for each aging cycle mm: memcontrol: drop dead CONFIG_MEMCG_SWAP config symbol mm: memcontrol: use do_memsw_account() in a few more places mm: memcontrol: deprecate swapaccounting=0 mode mm: memcontrol: don't allocate cgroup swap arrays when memcg is disabled mm/secretmem: remove reduntant return value mm/hugetlb: add available_huge_pages() func mm: remove unused inline functions from include/linux/mm_inline.h selftests/vm: add selftest for MADV_COLLAPSE of uffd-minor memory selftests/vm: add file/shmem MADV_COLLAPSE selftest for cleared pmd selftests/vm: add thp collapse shmem testing selftests/vm: add thp collapse file and tmpfs testing selftests/vm: modularize thp collapse memory operations selftests/vm: dedup THP helpers mm/khugepaged: add tracepoint to hpage_collapse_scan_file() mm/madvise: add file and shmem support to MADV_COLLAPSE ...
Diffstat (limited to 'mm/damon')
-rw-r--r--mm/damon/Kconfig3
-rw-r--r--mm/damon/core-test.h29
-rw-r--r--mm/damon/core.c281
-rw-r--r--mm/damon/dbgfs.c52
-rw-r--r--mm/damon/lru_sort.c380
-rw-r--r--mm/damon/modules-common.h46
-rw-r--r--mm/damon/ops-common.c50
-rw-r--r--mm/damon/ops-common.h2
-rw-r--r--mm/damon/paddr.c43
-rw-r--r--mm/damon/reclaim.c289
-rw-r--r--mm/damon/sysfs.c143
-rw-r--r--mm/damon/vaddr-test.h36
-rw-r--r--mm/damon/vaddr.c102
13 files changed, 596 insertions, 860 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index 66265e3a9c65..7821fcb3f258 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -68,6 +68,9 @@ config DAMON_DBGFS
If unsure, say N.
+ This will be removed after >5.15.y LTS kernel is released, so users
+ should move to the sysfs interface (DAMON_SYSFS).
+
config DAMON_DBGFS_KUNIT_TEST
bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS
depends on DAMON_DBGFS && KUNIT=y
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index 573669566f84..3db9b7368756 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -126,7 +126,7 @@ static void damon_test_split_at(struct kunit *test)
t = damon_new_target();
r = damon_new_region(0, 100);
damon_add_region(r, t);
- damon_split_region_at(c, t, r, 25);
+ damon_split_region_at(t, r, 25);
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
@@ -219,14 +219,14 @@ static void damon_test_split_regions_of(struct kunit *test)
t = damon_new_target();
r = damon_new_region(0, 22);
damon_add_region(r, t);
- damon_split_regions_of(c, t, 2);
+ damon_split_regions_of(t, 2);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
damon_free_target(t);
t = damon_new_target();
r = damon_new_region(0, 220);
damon_add_region(r, t);
- damon_split_regions_of(c, t, 4);
+ damon_split_regions_of(t, 4);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
damon_free_target(t);
damon_destroy_ctx(c);
@@ -267,6 +267,28 @@ static void damon_test_ops_registration(struct kunit *test)
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
}
+static void damon_test_set_regions(struct kunit *test)
+{
+ struct damon_target *t = damon_new_target();
+ struct damon_region *r1 = damon_new_region(4, 16);
+ struct damon_region *r2 = damon_new_region(24, 32);
+ struct damon_addr_range range = {.start = 8, .end = 28};
+ unsigned long expects[] = {8, 16, 16, 24, 24, 28};
+ int expect_idx = 0;
+ struct damon_region *r;
+
+ damon_add_region(r1, t);
+ damon_add_region(r2, t);
+ damon_set_regions(t, &range, 1);
+
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
+ }
+ damon_destroy_target(t);
+}
+
static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_target),
KUNIT_CASE(damon_test_regions),
@@ -276,6 +298,7 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_merge_regions_of),
KUNIT_CASE(damon_test_split_regions_of),
KUNIT_CASE(damon_test_ops_registration),
+ KUNIT_CASE(damon_test_set_regions),
{},
};
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 7d25dc582fe3..4de8c7c52979 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -29,6 +29,8 @@ static bool running_exclusive_ctxs;
static DEFINE_MUTEX(damon_ops_lock);
static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
+static struct kmem_cache *damon_region_cache __ro_after_init;
+
/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
static bool __damon_is_registered_ops(enum damon_ops_id id)
{
@@ -119,7 +121,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end)
{
struct damon_region *region;
- region = kmalloc(sizeof(*region), GFP_KERNEL);
+ region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
if (!region)
return NULL;
@@ -148,7 +150,7 @@ static void damon_del_region(struct damon_region *r, struct damon_target *t)
static void damon_free_region(struct damon_region *r)
{
- kfree(r);
+ kmem_cache_free(damon_region_cache, r);
}
void damon_destroy_region(struct damon_region *r, struct damon_target *t)
@@ -169,6 +171,30 @@ static bool damon_intersect(struct damon_region *r,
}
/*
+ * Fill holes in regions with new regions.
+ */
+static int damon_fill_regions_holes(struct damon_region *first,
+ struct damon_region *last, struct damon_target *t)
+{
+ struct damon_region *r = first;
+
+ damon_for_each_region_from(r, t) {
+ struct damon_region *next, *newr;
+
+ if (r == last)
+ break;
+ next = damon_next_region(r);
+ if (r->ar.end != next->ar.start) {
+ newr = damon_new_region(r->ar.end, next->ar.start);
+ if (!newr)
+ return -ENOMEM;
+ damon_insert_region(newr, r, next, t);
+ }
+ }
+ return 0;
+}
+
+/*
* damon_set_regions() - Set regions of a target for given address ranges.
* @t: the given target.
* @ranges: array of new monitoring target ranges.
@@ -184,6 +210,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
{
struct damon_region *r, *next;
unsigned int i;
+ int err;
/* Remove regions which are not in the new ranges */
damon_for_each_region_safe(r, next, t) {
@@ -195,6 +222,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
damon_destroy_region(r, t);
}
+ r = damon_first_region(t);
/* Add new regions or resize existing regions to fit in the ranges */
for (i = 0; i < nr_ranges; i++) {
struct damon_region *first = NULL, *last, *newr;
@@ -202,7 +230,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
range = &ranges[i];
/* Get the first/last regions intersecting with the range */
- damon_for_each_region(r, t) {
+ damon_for_each_region_from(r, t) {
if (damon_intersect(r, range)) {
if (!first)
first = r;
@@ -225,52 +253,46 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
first->ar.start = ALIGN_DOWN(range->start,
DAMON_MIN_REGION);
last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
+
+ /* fill possible holes in the range */
+ err = damon_fill_regions_holes(first, last, t);
+ if (err)
+ return err;
}
}
return 0;
}
-struct damos *damon_new_scheme(
- unsigned long min_sz_region, unsigned long max_sz_region,
- unsigned int min_nr_accesses, unsigned int max_nr_accesses,
- unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota,
- struct damos_watermarks *wmarks)
+/* initialize private fields of damos_quota and return the pointer */
+static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
+{
+ quota->total_charged_sz = 0;
+ quota->total_charged_ns = 0;
+ quota->esz = 0;
+ quota->charged_sz = 0;
+ quota->charged_from = 0;
+ quota->charge_target_from = NULL;
+ quota->charge_addr_from = 0;
+ return quota;
+}
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks)
{
struct damos *scheme;
scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
if (!scheme)
return NULL;
- scheme->min_sz_region = min_sz_region;
- scheme->max_sz_region = max_sz_region;
- scheme->min_nr_accesses = min_nr_accesses;
- scheme->max_nr_accesses = max_nr_accesses;
- scheme->min_age_region = min_age_region;
- scheme->max_age_region = max_age_region;
+ scheme->pattern = *pattern;
scheme->action = action;
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
- scheme->quota.ms = quota->ms;
- scheme->quota.sz = quota->sz;
- scheme->quota.reset_interval = quota->reset_interval;
- scheme->quota.weight_sz = quota->weight_sz;
- scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
- scheme->quota.weight_age = quota->weight_age;
- scheme->quota.total_charged_sz = 0;
- scheme->quota.total_charged_ns = 0;
- scheme->quota.esz = 0;
- scheme->quota.charged_sz = 0;
- scheme->quota.charged_from = 0;
- scheme->quota.charge_target_from = NULL;
- scheme->quota.charge_addr_from = 0;
-
- scheme->wmarks.metric = wmarks->metric;
- scheme->wmarks.interval = wmarks->interval;
- scheme->wmarks.high = wmarks->high;
- scheme->wmarks.mid = wmarks->mid;
- scheme->wmarks.low = wmarks->low;
+ scheme->quota = *(damos_quota_init_priv(quota));
+
+ scheme->wmarks = *wmarks;
scheme->wmarks.activated = true;
return scheme;
@@ -360,17 +382,17 @@ struct damon_ctx *damon_new_ctx(void)
if (!ctx)
return NULL;
- ctx->sample_interval = 5 * 1000;
- ctx->aggr_interval = 100 * 1000;
- ctx->ops_update_interval = 60 * 1000 * 1000;
+ ctx->attrs.sample_interval = 5 * 1000;
+ ctx->attrs.aggr_interval = 100 * 1000;
+ ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
ktime_get_coarse_ts64(&ctx->last_aggregation);
ctx->last_ops_update = ctx->last_aggregation;
mutex_init(&ctx->kdamond_lock);
- ctx->min_nr_regions = 10;
- ctx->max_nr_regions = 1000;
+ ctx->attrs.min_nr_regions = 10;
+ ctx->attrs.max_nr_regions = 1000;
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
@@ -406,32 +428,21 @@ void damon_destroy_ctx(struct damon_ctx *ctx)
/**
* damon_set_attrs() - Set attributes for the monitoring.
* @ctx: monitoring context
- * @sample_int: time interval between samplings
- * @aggr_int: time interval between aggregations
- * @ops_upd_int: time interval between monitoring operations updates
- * @min_nr_reg: minimal number of regions
- * @max_nr_reg: maximum number of regions
+ * @attrs: monitoring attributes
*
* This function should not be called while the kdamond is running.
* Every time interval is in micro-seconds.
*
* Return: 0 on success, negative error code otherwise.
*/
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long ops_upd_int,
- unsigned long min_nr_reg, unsigned long max_nr_reg)
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
{
- if (min_nr_reg < 3)
+ if (attrs->min_nr_regions < 3)
return -EINVAL;
- if (min_nr_reg > max_nr_reg)
+ if (attrs->min_nr_regions > attrs->max_nr_regions)
return -EINVAL;
- ctx->sample_interval = sample_int;
- ctx->aggr_interval = aggr_int;
- ctx->ops_update_interval = ops_upd_int;
- ctx->min_nr_regions = min_nr_reg;
- ctx->max_nr_regions = max_nr_reg;
-
+ ctx->attrs = *attrs;
return 0;
}
@@ -443,10 +454,8 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
*
* This function should not be called while the kdamond of the context is
* running.
- *
- * Return: 0 if success, or negative error code otherwise.
*/
-int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
+void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
ssize_t nr_schemes)
{
struct damos *s, *next;
@@ -456,7 +465,6 @@ int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
damon_destroy_scheme(s);
for (i = 0; i < nr_schemes; i++)
damon_add_scheme(ctx, schemes[i]);
- return 0;
}
/**
@@ -485,8 +493,8 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
sz += r->ar.end - r->ar.start;
}
- if (ctx->min_nr_regions)
- sz /= ctx->min_nr_regions;
+ if (ctx->attrs.min_nr_regions)
+ sz /= ctx->attrs.min_nr_regions;
if (sz < DAMON_MIN_REGION)
sz = DAMON_MIN_REGION;
@@ -635,7 +643,7 @@ static bool damon_check_reset_time_interval(struct timespec64 *baseline,
static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
{
return damon_check_reset_time_interval(&ctx->last_aggregation,
- ctx->aggr_interval);
+ ctx->attrs.aggr_interval);
}
/*
@@ -658,19 +666,20 @@ static void kdamond_reset_aggregated(struct damon_ctx *c)
}
}
-static void damon_split_region_at(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- unsigned long sz_r);
+static void damon_split_region_at(struct damon_target *t,
+ struct damon_region *r, unsigned long sz_r);
static bool __damos_valid_target(struct damon_region *r, struct damos *s)
{
unsigned long sz;
sz = r->ar.end - r->ar.start;
- return s->min_sz_region <= sz && sz <= s->max_sz_region &&
- s->min_nr_accesses <= r->nr_accesses &&
- r->nr_accesses <= s->max_nr_accesses &&
- s->min_age_region <= r->age && r->age <= s->max_age_region;
+ return s->pattern.min_sz_region <= sz &&
+ sz <= s->pattern.max_sz_region &&
+ s->pattern.min_nr_accesses <= r->nr_accesses &&
+ r->nr_accesses <= s->pattern.max_nr_accesses &&
+ s->pattern.min_age_region <= r->age &&
+ r->age <= s->pattern.max_age_region;
}
static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
@@ -726,7 +735,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
continue;
sz = DAMON_MIN_REGION;
}
- damon_split_region_at(c, t, r, sz);
+ damon_split_region_at(t, r, sz);
r = damon_next_region(r);
sz = r->ar.end - r->ar.start;
}
@@ -745,7 +754,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
DAMON_MIN_REGION);
if (!sz)
goto update_stat;
- damon_split_region_at(c, t, r, sz);
+ damon_split_region_at(t, r, sz);
}
ktime_get_coarse_ts64(&begin);
sz_applied = c->ops.apply_scheme(c, t, r, s);
@@ -928,9 +937,8 @@ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
* r the region to be split
* sz_r size of the first sub-region that will be made
*/
-static void damon_split_region_at(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- unsigned long sz_r)
+static void damon_split_region_at(struct damon_target *t,
+ struct damon_region *r, unsigned long sz_r)
{
struct damon_region *new;
@@ -947,8 +955,7 @@ static void damon_split_region_at(struct damon_ctx *ctx,
}
/* Split every region in the given target into 'nr_subs' regions */
-static void damon_split_regions_of(struct damon_ctx *ctx,
- struct damon_target *t, int nr_subs)
+static void damon_split_regions_of(struct damon_target *t, int nr_subs)
{
struct damon_region *r, *next;
unsigned long sz_region, sz_sub = 0;
@@ -969,7 +976,7 @@ static void damon_split_regions_of(struct damon_ctx *ctx,
if (sz_sub == 0 || sz_sub >= sz_region)
continue;
- damon_split_region_at(ctx, t, r, sz_sub);
+ damon_split_region_at(t, r, sz_sub);
sz_region = sz_sub;
}
}
@@ -995,16 +1002,16 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
damon_for_each_target(t, ctx)
nr_regions += damon_nr_regions(t);
- if (nr_regions > ctx->max_nr_regions / 2)
+ if (nr_regions > ctx->attrs.max_nr_regions / 2)
return;
/* Maybe the middle of the region has different access frequency */
if (last_nr_regions == nr_regions &&
- nr_regions < ctx->max_nr_regions / 3)
+ nr_regions < ctx->attrs.max_nr_regions / 3)
nr_subregions = 3;
damon_for_each_target(t, ctx)
- damon_split_regions_of(ctx, t, nr_subregions);
+ damon_split_regions_of(t, nr_subregions);
last_nr_regions = nr_regions;
}
@@ -1018,7 +1025,7 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
static bool kdamond_need_update_operations(struct damon_ctx *ctx)
{
return damon_check_reset_time_interval(&ctx->last_ops_update,
- ctx->ops_update_interval);
+ ctx->attrs.ops_update_interval);
}
/*
@@ -1142,32 +1149,27 @@ static int kdamond_fn(void *data)
struct damon_region *r, *next;
unsigned int max_nr_accesses = 0;
unsigned long sz_limit = 0;
- bool done = false;
pr_debug("kdamond (%d) starts\n", current->pid);
if (ctx->ops.init)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
- done = true;
+ goto done;
sz_limit = damon_region_sz_limit(ctx);
- while (!kdamond_need_stop(ctx) && !done) {
- if (kdamond_wait_activation(ctx)) {
- done = true;
- continue;
- }
+ while (!kdamond_need_stop(ctx)) {
+ if (kdamond_wait_activation(ctx))
+ break;
if (ctx->ops.prepare_access_checks)
ctx->ops.prepare_access_checks(ctx);
if (ctx->callback.after_sampling &&
- ctx->callback.after_sampling(ctx)) {
- done = true;
- continue;
- }
+ ctx->callback.after_sampling(ctx))
+ break;
- kdamond_usleep(ctx->sample_interval);
+ kdamond_usleep(ctx->attrs.sample_interval);
if (ctx->ops.check_accesses)
max_nr_accesses = ctx->ops.check_accesses(ctx);
@@ -1177,10 +1179,8 @@ static int kdamond_fn(void *data)
max_nr_accesses / 10,
sz_limit);
if (ctx->callback.after_aggregation &&
- ctx->callback.after_aggregation(ctx)) {
- done = true;
- continue;
- }
+ ctx->callback.after_aggregation(ctx))
+ break;
kdamond_apply_schemes(ctx);
kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx);
@@ -1194,6 +1194,7 @@ static int kdamond_fn(void *data)
sz_limit = damon_region_sz_limit(ctx);
}
}
+done:
damon_for_each_target(t, ctx) {
damon_for_each_region_safe(r, next, t)
damon_destroy_region(r, t);
@@ -1218,4 +1219,90 @@ static int kdamond_fn(void *data)
return 0;
}
+/*
+ * struct damon_system_ram_region - System RAM resource address region of
+ * [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_system_ram_region {
+ unsigned long start;
+ unsigned long end;
+};
+
+static int walk_system_ram(struct resource *res, void *arg)
+{
+ struct damon_system_ram_region *a = arg;
+
+ if (a->end - a->start < resource_size(res)) {
+ a->start = res->start;
+ a->end = res->end;
+ }
+ return 0;
+}
+
+/*
+ * Find biggest 'System RAM' resource and store its start and end address in
+ * @start and @end, respectively. If no System RAM is found, returns false.
+ */
+static bool damon_find_biggest_system_ram(unsigned long *start,
+ unsigned long *end)
+
+{
+ struct damon_system_ram_region arg = {};
+
+ walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
+ if (arg.end <= arg.start)
+ return false;
+
+ *start = arg.start;
+ *end = arg.end;
+ return true;
+}
+
+/**
+ * damon_set_region_biggest_system_ram_default() - Set the region of the given
+ * monitoring target as requested, or biggest 'System RAM'.
+ * @t: The monitoring target to set the region.
+ * @start: The pointer to the start address of the region.
+ * @end: The pointer to the end address of the region.
+ *
+ * This function sets the region of @t as requested by @start and @end. If the
+ * values of @start and @end are zero, however, this function finds the biggest
+ * 'System RAM' resource and sets the region to cover the resource. In the
+ * latter case, this function saves the start and end addresses of the resource
+ * in @start and @end, respectively.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end)
+{
+ struct damon_addr_range addr_range;
+
+ if (*start > *end)
+ return -EINVAL;
+
+ if (!*start && !*end &&
+ !damon_find_biggest_system_ram(start, end))
+ return -EINVAL;
+
+ addr_range.start = *start;
+ addr_range.end = *end;
+ return damon_set_regions(t, &addr_range, 1);
+}
+
+static int __init damon_init(void)
+{
+ damon_region_cache = KMEM_CACHE(damon_region, 0);
+ if (unlikely(!damon_region_cache)) {
+ pr_err("creating damon_region_cache fails\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+subsys_initcall(damon_init);
+
#include "core-test.h"
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 4e51466c4e74..6f0ae7d3ae39 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -55,9 +55,9 @@ static ssize_t dbgfs_attrs_read(struct file *file,
mutex_lock(&ctx->kdamond_lock);
ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
- ctx->sample_interval, ctx->aggr_interval,
- ctx->ops_update_interval, ctx->min_nr_regions,
- ctx->max_nr_regions);
+ ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
+ ctx->attrs.ops_update_interval,
+ ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
mutex_unlock(&ctx->kdamond_lock);
return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
@@ -67,7 +67,7 @@ static ssize_t dbgfs_attrs_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct damon_ctx *ctx = file->private_data;
- unsigned long s, a, r, minr, maxr;
+ struct damon_attrs attrs;
char *kbuf;
ssize_t ret;
@@ -76,7 +76,10 @@ static ssize_t dbgfs_attrs_write(struct file *file,
return PTR_ERR(kbuf);
if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
- &s, &a, &r, &minr, &maxr) != 5) {
+ &attrs.sample_interval, &attrs.aggr_interval,
+ &attrs.ops_update_interval,
+ &attrs.min_nr_regions,
+ &attrs.max_nr_regions) != 5) {
ret = -EINVAL;
goto out;
}
@@ -87,7 +90,7 @@ static ssize_t dbgfs_attrs_write(struct file *file,
goto unlock_out;
}
- ret = damon_set_attrs(ctx, s, a, r, minr, maxr);
+ ret = damon_set_attrs(ctx, &attrs);
if (!ret)
ret = count;
unlock_out:
@@ -131,9 +134,12 @@ static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
damon_for_each_scheme(s, c) {
rc = scnprintf(&buf[written], len - written,
"%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
- s->min_sz_region, s->max_sz_region,
- s->min_nr_accesses, s->max_nr_accesses,
- s->min_age_region, s->max_age_region,
+ s->pattern.min_sz_region,
+ s->pattern.max_sz_region,
+ s->pattern.min_nr_accesses,
+ s->pattern.max_nr_accesses,
+ s->pattern.min_age_region,
+ s->pattern.max_age_region,
damos_action_to_dbgfs_scheme_action(s->action),
s->quota.ms, s->quota.sz,
s->quota.reset_interval,
@@ -221,8 +227,6 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
struct damos *scheme, **schemes;
const int max_nr_schemes = 256;
int pos = 0, parsed, ret;
- unsigned long min_sz, max_sz;
- unsigned int min_nr_a, max_nr_a, min_age, max_age;
unsigned int action_input;
enum damos_action action;
@@ -233,13 +237,18 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
*nr_schemes = 0;
while (pos < len && *nr_schemes < max_nr_schemes) {
+ struct damos_access_pattern pattern = {};
struct damos_quota quota = {};
struct damos_watermarks wmarks;
ret = sscanf(&str[pos],
"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
- &min_sz, &max_sz, &min_nr_a, &max_nr_a,
- &min_age, &max_age, &action_input, &quota.ms,
+ &pattern.min_sz_region, &pattern.max_sz_region,
+ &pattern.min_nr_accesses,
+ &pattern.max_nr_accesses,
+ &pattern.min_age_region,
+ &pattern.max_age_region,
+ &action_input, &quota.ms,
&quota.sz, &quota.reset_interval,
&quota.weight_sz, &quota.weight_nr_accesses,
&quota.weight_age, &wmarks.metric,
@@ -251,7 +260,9 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
if ((int)action < 0)
goto fail;
- if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age)
+ if (pattern.min_sz_region > pattern.max_sz_region ||
+ pattern.min_nr_accesses > pattern.max_nr_accesses ||
+ pattern.min_age_region > pattern.max_age_region)
goto fail;
if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
@@ -259,8 +270,7 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
goto fail;
pos += parsed;
- scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
- min_age, max_age, action, &quota, &wmarks);
+ scheme = damon_new_scheme(&pattern, action, &quota, &wmarks);
if (!scheme)
goto fail;
@@ -297,11 +307,9 @@ static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
goto unlock_out;
}
- ret = damon_set_schemes(ctx, schemes, nr_schemes);
- if (!ret) {
- ret = count;
- nr_schemes = 0;
- }
+ damon_set_schemes(ctx, schemes, nr_schemes);
+ ret = count;
+ nr_schemes = 0;
unlock_out:
mutex_unlock(&ctx->kdamond_lock);
@@ -1053,7 +1061,7 @@ static int __init __damon_dbgfs_init(void)
fops[i]);
dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
- dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL);
+ dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
if (!dbgfs_dirs) {
debugfs_remove(dbgfs_root);
return -ENOMEM;
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index 9de6f00a71c5..efbc2bda8b9c 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -13,6 +13,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include "modules-common.h"
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -63,109 +65,35 @@ module_param(hot_thres_access_freq, ulong, 0600);
static unsigned long cold_min_age __read_mostly = 120000000;
module_param(cold_min_age, ulong, 0600);
-/*
- * Limit of time for trying the LRU lists sorting in milliseconds.
- *
- * DAMON_LRU_SORT tries to use only up to this time within a time window
- * (quota_reset_interval_ms) for trying LRU lists sorting. This can be used
- * for limiting CPU consumption of DAMON_LRU_SORT. If the value is zero, the
- * limit is disabled.
- *
- * 10 ms by default.
- */
-static unsigned long quota_ms __read_mostly = 10;
-module_param(quota_ms, ulong, 0600);
-
-/*
- * The time quota charge reset interval in milliseconds.
- *
- * The charge reset interval for the quota of time (quota_ms). That is,
- * DAMON_LRU_SORT does not try LRU-lists sorting for more than quota_ms
- * milliseconds or quota_sz bytes within quota_reset_interval_ms milliseconds.
- *
- * 1 second by default.
- */
-static unsigned long quota_reset_interval_ms __read_mostly = 1000;
-module_param(quota_reset_interval_ms, ulong, 0600);
-
-/*
- * The watermarks check time interval in microseconds.
- *
- * Minimal time to wait before checking the watermarks, when DAMON_LRU_SORT is
- * enabled but inactive due to its watermarks rule. 5 seconds by default.
- */
-static unsigned long wmarks_interval __read_mostly = 5000000;
-module_param(wmarks_interval, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the high watermark.
- *
- * If free memory of the system in bytes per thousand bytes is higher than
- * this, DAMON_LRU_SORT becomes inactive, so it does nothing but periodically
- * checks the watermarks. 200 (20%) by default.
- */
-static unsigned long wmarks_high __read_mostly = 200;
-module_param(wmarks_high, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the middle watermark.
- *
- * If free memory of the system in bytes per thousand bytes is between this and
- * the low watermark, DAMON_LRU_SORT becomes active, so starts the monitoring
- * and the LRU-lists sorting. 150 (15%) by default.
- */
-static unsigned long wmarks_mid __read_mostly = 150;
-module_param(wmarks_mid, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the low watermark.
- *
- * If free memory of the system in bytes per thousand bytes is lower than this,
- * DAMON_LRU_SORT becomes inactive, so it does nothing but periodically checks
- * the watermarks. 50 (5%) by default.
- */
-static unsigned long wmarks_low __read_mostly = 50;
-module_param(wmarks_low, ulong, 0600);
-
-/*
- * Sampling interval for the monitoring in microseconds.
- *
- * The sampling interval of DAMON for the hot/cold memory monitoring. Please
- * refer to the DAMON documentation for more detail. 5 ms by default.
- */
-static unsigned long sample_interval __read_mostly = 5000;
-module_param(sample_interval, ulong, 0600);
-
-/*
- * Aggregation interval for the monitoring in microseconds.
- *
- * The aggregation interval of DAMON for the hot/cold memory monitoring.
- * Please refer to the DAMON documentation for more detail. 100 ms by default.
- */
-static unsigned long aggr_interval __read_mostly = 100000;
-module_param(aggr_interval, ulong, 0600);
-
-/*
- * Minimum number of monitoring regions.
- *
- * The minimal number of monitoring regions of DAMON for the hot/cold memory
- * monitoring. This can be used to set lower-bound of the monitoring quality.
- * But, setting this too high could result in increased monitoring overhead.
- * Please refer to the DAMON documentation for more detail. 10 by default.
- */
-static unsigned long min_nr_regions __read_mostly = 10;
-module_param(min_nr_regions, ulong, 0600);
-
-/*
- * Maximum number of monitoring regions.
- *
- * The maximum number of monitoring regions of DAMON for the hot/cold memory
- * monitoring. This can be used to set upper-bound of the monitoring overhead.
- * However, setting this too low could result in bad monitoring quality.
- * Please refer to the DAMON documentation for more detail. 1000 by default.
- */
-static unsigned long max_nr_regions __read_mostly = 1000;
-module_param(max_nr_regions, ulong, 0600);
+static struct damos_quota damon_lru_sort_quota = {
+ /* Use up to 10 ms per 1 sec, by default */
+ .ms = 10,
+ .sz = 0,
+ .reset_interval = 1000,
+ /* Within the quota, mark hotter regions accessed first. */
+ .weight_sz = 0,
+ .weight_nr_accesses = 1,
+ .weight_age = 0,
+};
+DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(damon_lru_sort_quota);
+
+static struct damos_watermarks damon_lru_sort_wmarks = {
+ .metric = DAMOS_WMARK_FREE_MEM_RATE,
+ .interval = 5000000, /* 5 seconds */
+ .high = 200, /* 20 percent */
+ .mid = 150, /* 15 percent */
+ .low = 50, /* 5 percent */
+};
+DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_lru_sort_wmarks);
+
+static struct damon_attrs damon_lru_sort_mon_attrs = {
+ .sample_interval = 5000, /* 5 ms */
+ .aggr_interval = 100000, /* 100 ms */
+ .ops_update_interval = 0,
+ .min_nr_regions = 10,
+ .max_nr_regions = 1000,
+};
+DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_lru_sort_mon_attrs);
/*
* Start of the target memory region in physical address.
@@ -194,222 +122,97 @@ module_param(monitor_region_end, ulong, 0600);
static int kdamond_pid __read_mostly = -1;
module_param(kdamond_pid, int, 0400);
-/*
- * Number of hot memory regions that tried to be LRU-sorted.
- */
-static unsigned long nr_lru_sort_tried_hot_regions __read_mostly;
-module_param(nr_lru_sort_tried_hot_regions, ulong, 0400);
-
-/*
- * Total bytes of hot memory regions that tried to be LRU-sorted.
- */
-static unsigned long bytes_lru_sort_tried_hot_regions __read_mostly;
-module_param(bytes_lru_sort_tried_hot_regions, ulong, 0400);
-
-/*
- * Number of hot memory regions that successfully be LRU-sorted.
- */
-static unsigned long nr_lru_sorted_hot_regions __read_mostly;
-module_param(nr_lru_sorted_hot_regions, ulong, 0400);
-
-/*
- * Total bytes of hot memory regions that successfully be LRU-sorted.
- */
-static unsigned long bytes_lru_sorted_hot_regions __read_mostly;
-module_param(bytes_lru_sorted_hot_regions, ulong, 0400);
-
-/*
- * Number of times that the time quota limit for hot regions have exceeded
- */
-static unsigned long nr_hot_quota_exceeds __read_mostly;
-module_param(nr_hot_quota_exceeds, ulong, 0400);
-
-/*
- * Number of cold memory regions that tried to be LRU-sorted.
- */
-static unsigned long nr_lru_sort_tried_cold_regions __read_mostly;
-module_param(nr_lru_sort_tried_cold_regions, ulong, 0400);
-
-/*
- * Total bytes of cold memory regions that tried to be LRU-sorted.
- */
-static unsigned long bytes_lru_sort_tried_cold_regions __read_mostly;
-module_param(bytes_lru_sort_tried_cold_regions, ulong, 0400);
-
-/*
- * Number of cold memory regions that successfully be LRU-sorted.
- */
-static unsigned long nr_lru_sorted_cold_regions __read_mostly;
-module_param(nr_lru_sorted_cold_regions, ulong, 0400);
-
-/*
- * Total bytes of cold memory regions that successfully be LRU-sorted.
- */
-static unsigned long bytes_lru_sorted_cold_regions __read_mostly;
-module_param(bytes_lru_sorted_cold_regions, ulong, 0400);
-
-/*
- * Number of times that the time quota limit for cold regions have exceeded
- */
-static unsigned long nr_cold_quota_exceeds __read_mostly;
-module_param(nr_cold_quota_exceeds, ulong, 0400);
+static struct damos_stat damon_lru_sort_hot_stat;
+DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat,
+ lru_sort_tried_hot_regions, lru_sorted_hot_regions,
+ hot_quota_exceeds);
+
+static struct damos_stat damon_lru_sort_cold_stat;
+DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_cold_stat,
+ lru_sort_tried_cold_regions, lru_sorted_cold_regions,
+ cold_quota_exceeds);
+
+static struct damos_access_pattern damon_lru_sort_stub_pattern = {
+ /* Find regions having PAGE_SIZE or larger size */
+ .min_sz_region = PAGE_SIZE,
+ .max_sz_region = ULONG_MAX,
+ /* no matter its access frequency */
+ .min_nr_accesses = 0,
+ .max_nr_accesses = UINT_MAX,
+ /* no matter its age */
+ .min_age_region = 0,
+ .max_age_region = UINT_MAX,
+};
static struct damon_ctx *ctx;
static struct damon_target *target;
-struct damon_lru_sort_ram_walk_arg {
- unsigned long start;
- unsigned long end;
-};
-
-static int walk_system_ram(struct resource *res, void *arg)
-{
- struct damon_lru_sort_ram_walk_arg *a = arg;
-
- if (a->end - a->start < resource_size(res)) {
- a->start = res->start;
- a->end = res->end;
- }
- return 0;
-}
-
-/*
- * Find biggest 'System RAM' resource and store its start and end address in
- * @start and @end, respectively. If no System RAM is found, returns false.
- */
-static bool get_monitoring_region(unsigned long *start, unsigned long *end)
+static struct damos *damon_lru_sort_new_scheme(
+ struct damos_access_pattern *pattern, enum damos_action action)
{
- struct damon_lru_sort_ram_walk_arg arg = {};
+ struct damos_quota quota = damon_lru_sort_quota;
- walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
- if (arg.end <= arg.start)
- return false;
+ /* Use half of total quota for hot/cold pages sorting */
+ quota.ms = quota.ms / 2;
- *start = arg.start;
- *end = arg.end;
- return true;
+ return damon_new_scheme(
+ /* find the pattern, and */
+ pattern,
+ /* (de)prioritize on LRU-lists */
+ action,
+ /* under the quota. */
+ &quota,
+ /* (De)activate this according to the watermarks. */
+ &damon_lru_sort_wmarks);
}
/* Create a DAMON-based operation scheme for hot memory regions */
static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
{
- struct damos_watermarks wmarks = {
- .metric = DAMOS_WMARK_FREE_MEM_RATE,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try LRU-lists sorting of hot pages for more than half
- * of quota_ms milliseconds within quota_reset_interval_ms.
- */
- .ms = quota_ms / 2,
- .sz = 0,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, mark hotter regions accessed first. */
- .weight_sz = 0,
- .weight_nr_accesses = 1,
- .weight_age = 0,
- };
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and accessed for more than the threshold */
- hot_thres, UINT_MAX,
- /* no matter its age */
- 0, UINT_MAX,
- /* prioritize those on LRU lists, as soon as found */
- DAMOS_LRU_PRIO,
- /* under the quota. */
- &quota,
- /* (De)activate this according to the watermarks. */
- &wmarks);
+ struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
- return scheme;
+ pattern.min_nr_accesses = hot_thres;
+ return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_PRIO);
}
/* Create a DAMON-based operation scheme for cold memory regions */
static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
{
- struct damos_watermarks wmarks = {
- .metric = DAMOS_WMARK_FREE_MEM_RATE,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try LRU-lists sorting of cold pages for more than
- * half of quota_ms milliseconds within
- * quota_reset_interval_ms.
- */
- .ms = quota_ms / 2,
- .sz = 0,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, mark colder regions not accessed first. */
- .weight_sz = 0,
- .weight_nr_accesses = 0,
- .weight_age = 1,
- };
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and not accessed at all */
- 0, 0,
- /* for cold_thres or more micro-seconds, and */
- cold_thres, UINT_MAX,
- /* mark those as not accessed, as soon as found */
- DAMOS_LRU_DEPRIO,
- /* under the quota. */
- &quota,
- /* (De)activate this according to the watermarks. */
- &wmarks);
+ struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
- return scheme;
+ pattern.max_nr_accesses = 0;
+ pattern.min_age_region = cold_thres;
+ return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
}
static int damon_lru_sort_apply_parameters(void)
{
- struct damos *scheme, *next_scheme;
- struct damon_addr_range addr_range;
+ struct damos *scheme;
unsigned int hot_thres, cold_thres;
int err = 0;
- err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
- min_nr_regions, max_nr_regions);
+ err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
if (err)
return err;
- /* free previously set schemes */
- damon_for_each_scheme_safe(scheme, next_scheme, ctx)
- damon_destroy_scheme(scheme);
-
/* aggr_interval / sample_interval is the maximum nr_accesses */
- hot_thres = aggr_interval / sample_interval * hot_thres_access_freq /
- 1000;
+ hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+ damon_lru_sort_mon_attrs.sample_interval *
+ hot_thres_access_freq / 1000;
scheme = damon_lru_sort_new_hot_scheme(hot_thres);
if (!scheme)
return -ENOMEM;
- damon_add_scheme(ctx, scheme);
+ damon_set_schemes(ctx, &scheme, 1);
- cold_thres = cold_min_age / aggr_interval;
+ cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
scheme = damon_lru_sort_new_cold_scheme(cold_thres);
if (!scheme)
return -ENOMEM;
damon_add_scheme(ctx, scheme);
- if (monitor_region_start > monitor_region_end)
- return -EINVAL;
- if (!monitor_region_start && !monitor_region_end &&
- !get_monitoring_region(&monitor_region_start,
- &monitor_region_end))
- return -EINVAL;
- addr_range.start = monitor_region_start;
- addr_range.end = monitor_region_end;
- return damon_set_regions(target, &addr_range, 1);
+ return damon_set_region_biggest_system_ram_default(target,
+ &monitor_region_start,
+ &monitor_region_end);
}
static int damon_lru_sort_turn(bool on)
@@ -495,19 +298,10 @@ static int damon_lru_sort_after_aggregation(struct damon_ctx *c)
/* update the stats parameter */
damon_for_each_scheme(s, c) {
- if (s->action == DAMOS_LRU_PRIO) {
- nr_lru_sort_tried_hot_regions = s->stat.nr_tried;
- bytes_lru_sort_tried_hot_regions = s->stat.sz_tried;
- nr_lru_sorted_hot_regions = s->stat.nr_applied;
- bytes_lru_sorted_hot_regions = s->stat.sz_applied;
- nr_hot_quota_exceeds = s->stat.qt_exceeds;
- } else if (s->action == DAMOS_LRU_DEPRIO) {
- nr_lru_sort_tried_cold_regions = s->stat.nr_tried;
- bytes_lru_sort_tried_cold_regions = s->stat.sz_tried;
- nr_lru_sorted_cold_regions = s->stat.nr_applied;
- bytes_lru_sorted_cold_regions = s->stat.sz_applied;
- nr_cold_quota_exceeds = s->stat.qt_exceeds;
- }
+ if (s->action == DAMOS_LRU_PRIO)
+ damon_lru_sort_hot_stat = s->stat;
+ else if (s->action == DAMOS_LRU_DEPRIO)
+ damon_lru_sort_cold_stat = s->stat;
}
return damon_lru_sort_handle_commit_inputs();
diff --git a/mm/damon/modules-common.h b/mm/damon/modules-common.h
new file mode 100644
index 000000000000..5a4921851d32
--- /dev/null
+++ b/mm/damon/modules-common.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Primitives for DAMON Modules
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <linux/moduleparam.h>
+
+#define DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(attrs) \
+ module_param_named(sample_interval, attrs.sample_interval, \
+ ulong, 0600); \
+ module_param_named(aggr_interval, attrs.aggr_interval, ulong, \
+ 0600); \
+ module_param_named(min_nr_regions, attrs.min_nr_regions, ulong, \
+ 0600); \
+ module_param_named(max_nr_regions, attrs.max_nr_regions, ulong, \
+ 0600);
+
+#define DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(quota) \
+ module_param_named(quota_ms, quota.ms, ulong, 0600); \
+ module_param_named(quota_reset_interval_ms, \
+ quota.reset_interval, ulong, 0600);
+
+#define DEFINE_DAMON_MODULES_DAMOS_QUOTAS(quota) \
+ DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(quota) \
+ module_param_named(quota_sz, quota.sz, ulong, 0600);
+
+#define DEFINE_DAMON_MODULES_WMARKS_PARAMS(wmarks) \
+ module_param_named(wmarks_interval, wmarks.interval, ulong, \
+ 0600); \
+ module_param_named(wmarks_high, wmarks.high, ulong, 0600); \
+ module_param_named(wmarks_mid, wmarks.mid, ulong, 0600); \
+ module_param_named(wmarks_low, wmarks.low, ulong, 0600);
+
+#define DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(stat, try_name, \
+ succ_name, qt_exceed_name) \
+ module_param_named(nr_##try_name, stat.nr_tried, ulong, 0400); \
+ module_param_named(bytes_##try_name, stat.sz_tried, ulong, \
+ 0400); \
+ module_param_named(nr_##succ_name, stat.nr_applied, ulong, \
+ 0400); \
+ module_param_named(bytes_##succ_name, stat.sz_applied, ulong, \
+ 0400); \
+ module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \
+ 0400);
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index b1335de200e7..75409601f934 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -88,7 +88,7 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
#define DAMON_MAX_SUBSCORE (100)
#define DAMON_MAX_AGE_IN_LOG (32)
-int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
+int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s)
{
unsigned int max_nr_accesses;
@@ -99,10 +99,10 @@ int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
unsigned int age_weight = s->quota.weight_age;
int hotness;
- max_nr_accesses = c->aggr_interval / c->sample_interval;
+ max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
- age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000;
+ age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
age_in_log++, age_in_sec >>= 1)
;
@@ -127,48 +127,14 @@ int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
*/
hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
- /* Return coldness of the region */
- return DAMOS_MAX_SCORE - hotness;
+ return hotness;
}
-int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s)
{
- unsigned int max_nr_accesses;
- int freq_subscore;
- unsigned int age_in_sec;
- int age_in_log, age_subscore;
- unsigned int freq_weight = s->quota.weight_nr_accesses;
- unsigned int age_weight = s->quota.weight_age;
- int hotness;
-
- max_nr_accesses = c->aggr_interval / c->sample_interval;
- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
-
- age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000;
- for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
- age_in_log++, age_in_sec >>= 1)
- ;
+ int hotness = damon_hot_score(c, r, s);
- /* If frequency is 0, higher age means it's colder */
- if (freq_subscore == 0)
- age_in_log *= -1;
-
- /*
- * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG].
- * Scale it to be in [0, 100] and set it as age subscore.
- */
- age_in_log += DAMON_MAX_AGE_IN_LOG;
- age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
- DAMON_MAX_AGE_IN_LOG / 2;
-
- hotness = (freq_weight * freq_subscore + age_weight * age_subscore);
- if (freq_weight + age_weight)
- hotness /= freq_weight + age_weight;
- /*
- * Transform it to fit in [0, DAMOS_MAX_SCORE]
- */
- hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
-
- return hotness;
+ /* Return coldness of the region */
+ return DAMOS_MAX_SCORE - hotness;
}
diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
index 52329ff361cd..8d82d3722204 100644
--- a/mm/damon/ops-common.h
+++ b/mm/damon/ops-common.h
@@ -12,7 +12,7 @@ struct page *damon_get_page(unsigned long pfn);
void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
-int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
+int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s);
int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index dc131c6a5403..e1a4315c4be6 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -63,8 +63,7 @@ out:
folio_put(folio);
}
-static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
- struct damon_region *r)
+static void __damon_pa_prepare_access_check(struct damon_region *r)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
@@ -78,7 +77,7 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
- __damon_pa_prepare_access_check(ctx, r);
+ __damon_pa_prepare_access_check(r);
}
}
@@ -166,8 +165,7 @@ out:
return result.accessed;
}
-static void __damon_pa_check_access(struct damon_ctx *ctx,
- struct damon_region *r)
+static void __damon_pa_check_access(struct damon_region *r)
{
static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE;
@@ -196,7 +194,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
- __damon_pa_check_access(ctx, r);
+ __damon_pa_check_access(r);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
}
@@ -233,7 +231,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r)
return applied * PAGE_SIZE;
}
-static unsigned long damon_pa_mark_accessed(struct damon_region *r)
+static inline unsigned long damon_pa_mark_accessed_or_deactivate(
+ struct damon_region *r, bool mark_accessed)
{
unsigned long addr, applied = 0;
@@ -242,27 +241,24 @@ static unsigned long damon_pa_mark_accessed(struct damon_region *r)
if (!page)
continue;
- mark_page_accessed(page);
+ if (mark_accessed)
+ mark_page_accessed(page);
+ else
+ deactivate_page(page);
put_page(page);
applied++;
}
return applied * PAGE_SIZE;
}
-static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
+static unsigned long damon_pa_mark_accessed(struct damon_region *r)
{
- unsigned long addr, applied = 0;
-
- for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct page *page = damon_get_page(PHYS_PFN(addr));
+ return damon_pa_mark_accessed_or_deactivate(r, true);
+}
- if (!page)
- continue;
- deactivate_page(page);
- put_page(page);
- applied++;
- }
- return applied * PAGE_SIZE;
+static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
+{
+ return damon_pa_mark_accessed_or_deactivate(r, false);
}
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
@@ -276,7 +272,10 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
return damon_pa_mark_accessed(r);
case DAMOS_LRU_DEPRIO:
return damon_pa_deactivate_pages(r);
+ case DAMOS_STAT:
+ break;
default:
+ /* DAMOS actions that not yet supported by 'paddr'. */
break;
}
return 0;
@@ -288,11 +287,11 @@ static int damon_pa_scheme_score(struct damon_ctx *context,
{
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pageout_score(context, r, scheme);
+ return damon_cold_score(context, r, scheme);
case DAMOS_LRU_PRIO:
return damon_hot_score(context, r, scheme);
case DAMOS_LRU_DEPRIO:
- return damon_pageout_score(context, r, scheme);
+ return damon_cold_score(context, r, scheme);
default:
break;
}
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index a7faf51b4bd4..162c9b1ca00f 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -13,6 +13,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include "modules-common.h"
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -50,124 +52,35 @@ module_param(commit_inputs, bool, 0600);
static unsigned long min_age __read_mostly = 120000000;
module_param(min_age, ulong, 0600);
-/*
- * Limit of time for trying the reclamation in milliseconds.
- *
- * DAMON_RECLAIM tries to use only up to this time within a time window
- * (quota_reset_interval_ms) for trying reclamation of cold pages. This can be
- * used for limiting CPU consumption of DAMON_RECLAIM. If the value is zero,
- * the limit is disabled.
- *
- * 10 ms by default.
- */
-static unsigned long quota_ms __read_mostly = 10;
-module_param(quota_ms, ulong, 0600);
-
-/*
- * Limit of size of memory for the reclamation in bytes.
- *
- * DAMON_RECLAIM charges amount of memory which it tried to reclaim within a
- * time window (quota_reset_interval_ms) and makes no more than this limit is
- * tried. This can be used for limiting consumption of CPU and IO. If this
- * value is zero, the limit is disabled.
- *
- * 128 MiB by default.
- */
-static unsigned long quota_sz __read_mostly = 128 * 1024 * 1024;
-module_param(quota_sz, ulong, 0600);
-
-/*
- * The time/size quota charge reset interval in milliseconds.
- *
- * The charge reset interval for the quota of time (quota_ms) and size
- * (quota_sz). That is, DAMON_RECLAIM does not try reclamation for more than
- * quota_ms milliseconds or quota_sz bytes within quota_reset_interval_ms
- * milliseconds.
- *
- * 1 second by default.
- */
-static unsigned long quota_reset_interval_ms __read_mostly = 1000;
-module_param(quota_reset_interval_ms, ulong, 0600);
-
-/*
- * The watermarks check time interval in microseconds.
- *
- * Minimal time to wait before checking the watermarks, when DAMON_RECLAIM is
- * enabled but inactive due to its watermarks rule. 5 seconds by default.
- */
-static unsigned long wmarks_interval __read_mostly = 5000000;
-module_param(wmarks_interval, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the high watermark.
- *
- * If free memory of the system in bytes per thousand bytes is higher than
- * this, DAMON_RECLAIM becomes inactive, so it does nothing but periodically
- * checks the watermarks. 500 (50%) by default.
- */
-static unsigned long wmarks_high __read_mostly = 500;
-module_param(wmarks_high, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the middle watermark.
- *
- * If free memory of the system in bytes per thousand bytes is between this and
- * the low watermark, DAMON_RECLAIM becomes active, so starts the monitoring
- * and the reclaiming. 400 (40%) by default.
- */
-static unsigned long wmarks_mid __read_mostly = 400;
-module_param(wmarks_mid, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the low watermark.
- *
- * If free memory of the system in bytes per thousand bytes is lower than this,
- * DAMON_RECLAIM becomes inactive, so it does nothing but periodically checks
- * the watermarks. In the case, the system falls back to the LRU-based page
- * granularity reclamation logic. 200 (20%) by default.
- */
-static unsigned long wmarks_low __read_mostly = 200;
-module_param(wmarks_low, ulong, 0600);
-
-/*
- * Sampling interval for the monitoring in microseconds.
- *
- * The sampling interval of DAMON for the cold memory monitoring. Please refer
- * to the DAMON documentation for more detail. 5 ms by default.
- */
-static unsigned long sample_interval __read_mostly = 5000;
-module_param(sample_interval, ulong, 0600);
-
-/*
- * Aggregation interval for the monitoring in microseconds.
- *
- * The aggregation interval of DAMON for the cold memory monitoring. Please
- * refer to the DAMON documentation for more detail. 100 ms by default.
- */
-static unsigned long aggr_interval __read_mostly = 100000;
-module_param(aggr_interval, ulong, 0600);
-
-/*
- * Minimum number of monitoring regions.
- *
- * The minimal number of monitoring regions of DAMON for the cold memory
- * monitoring. This can be used to set lower-bound of the monitoring quality.
- * But, setting this too high could result in increased monitoring overhead.
- * Please refer to the DAMON documentation for more detail. 10 by default.
- */
-static unsigned long min_nr_regions __read_mostly = 10;
-module_param(min_nr_regions, ulong, 0600);
-
-/*
- * Maximum number of monitoring regions.
- *
- * The maximum number of monitoring regions of DAMON for the cold memory
- * monitoring. This can be used to set upper-bound of the monitoring overhead.
- * However, setting this too low could result in bad monitoring quality.
- * Please refer to the DAMON documentation for more detail. 1000 by default.
- */
-static unsigned long max_nr_regions __read_mostly = 1000;
-module_param(max_nr_regions, ulong, 0600);
+static struct damos_quota damon_reclaim_quota = {
+ /* use up to 10 ms time, reclaim up to 128 MiB per 1 sec by default */
+ .ms = 10,
+ .sz = 128 * 1024 * 1024,
+ .reset_interval = 1000,
+ /* Within the quota, page out older regions first. */
+ .weight_sz = 0,
+ .weight_nr_accesses = 0,
+ .weight_age = 1
+};
+DEFINE_DAMON_MODULES_DAMOS_QUOTAS(damon_reclaim_quota);
+
+static struct damos_watermarks damon_reclaim_wmarks = {
+ .metric = DAMOS_WMARK_FREE_MEM_RATE,
+ .interval = 5000000, /* 5 seconds */
+ .high = 500, /* 50 percent */
+ .mid = 400, /* 40 percent */
+ .low = 200, /* 20 percent */
+};
+DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_reclaim_wmarks);
+
+static struct damon_attrs damon_reclaim_mon_attrs = {
+ .sample_interval = 5000, /* 5 ms */
+ .aggr_interval = 100000, /* 100 ms */
+ .ops_update_interval = 0,
+ .min_nr_regions = 10,
+ .max_nr_regions = 1000,
+};
+DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_reclaim_mon_attrs);
/*
* Start of the target memory region in physical address.
@@ -196,119 +109,44 @@ module_param(monitor_region_end, ulong, 0600);
static int kdamond_pid __read_mostly = -1;
module_param(kdamond_pid, int, 0400);
-/*
- * Number of memory regions that tried to be reclaimed.
- */
-static unsigned long nr_reclaim_tried_regions __read_mostly;
-module_param(nr_reclaim_tried_regions, ulong, 0400);
-
-/*
- * Total bytes of memory regions that tried to be reclaimed.
- */
-static unsigned long bytes_reclaim_tried_regions __read_mostly;
-module_param(bytes_reclaim_tried_regions, ulong, 0400);
-
-/*
- * Number of memory regions that successfully be reclaimed.
- */
-static unsigned long nr_reclaimed_regions __read_mostly;
-module_param(nr_reclaimed_regions, ulong, 0400);
-
-/*
- * Total bytes of memory regions that successfully be reclaimed.
- */
-static unsigned long bytes_reclaimed_regions __read_mostly;
-module_param(bytes_reclaimed_regions, ulong, 0400);
-
-/*
- * Number of times that the time/space quota limits have exceeded
- */
-static unsigned long nr_quota_exceeds __read_mostly;
-module_param(nr_quota_exceeds, ulong, 0400);
+static struct damos_stat damon_reclaim_stat;
+DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat,
+ reclaim_tried_regions, reclaimed_regions, quota_exceeds);
static struct damon_ctx *ctx;
static struct damon_target *target;
-struct damon_reclaim_ram_walk_arg {
- unsigned long start;
- unsigned long end;
-};
-
-static int walk_system_ram(struct resource *res, void *arg)
-{
- struct damon_reclaim_ram_walk_arg *a = arg;
-
- if (a->end - a->start < resource_size(res)) {
- a->start = res->start;
- a->end = res->end;
- }
- return 0;
-}
-
-/*
- * Find biggest 'System RAM' resource and store its start and end address in
- * @start and @end, respectively. If no System RAM is found, returns false.
- */
-static bool get_monitoring_region(unsigned long *start, unsigned long *end)
-{
- struct damon_reclaim_ram_walk_arg arg = {};
-
- walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
- if (arg.end <= arg.start)
- return false;
-
- *start = arg.start;
- *end = arg.end;
- return true;
-}
-
static struct damos *damon_reclaim_new_scheme(void)
{
- struct damos_watermarks wmarks = {
- .metric = DAMOS_WMARK_FREE_MEM_RATE,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try reclamation for more than quota_ms milliseconds
- * or quota_sz bytes within quota_reset_interval_ms.
- */
- .ms = quota_ms,
- .sz = quota_sz,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, page out older regions first. */
- .weight_sz = 0,
- .weight_nr_accesses = 0,
- .weight_age = 1
+ struct damos_access_pattern pattern = {
+ /* Find regions having PAGE_SIZE or larger size */
+ .min_sz_region = PAGE_SIZE,
+ .max_sz_region = ULONG_MAX,
+ /* and not accessed at all */
+ .min_nr_accesses = 0,
+ .max_nr_accesses = 0,
+ /* for min_age or more micro-seconds */
+ .min_age_region = min_age /
+ damon_reclaim_mon_attrs.aggr_interval,
+ .max_age_region = UINT_MAX,
};
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and not accessed at all */
- 0, 0,
- /* for min_age or more micro-seconds, and */
- min_age / aggr_interval, UINT_MAX,
+
+ return damon_new_scheme(
+ &pattern,
/* page out those, as soon as found */
DAMOS_PAGEOUT,
/* under the quota. */
- &quota,
+ &damon_reclaim_quota,
/* (De)activate this according to the watermarks. */
- &wmarks);
-
- return scheme;
+ &damon_reclaim_wmarks);
}
static int damon_reclaim_apply_parameters(void)
{
struct damos *scheme;
- struct damon_addr_range addr_range;
int err = 0;
- err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
- min_nr_regions, max_nr_regions);
+ err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
if (err)
return err;
@@ -316,19 +154,11 @@ static int damon_reclaim_apply_parameters(void)
scheme = damon_reclaim_new_scheme();
if (!scheme)
return -ENOMEM;
- err = damon_set_schemes(ctx, &scheme, 1);
- if (err)
- return err;
+ damon_set_schemes(ctx, &scheme, 1);
- if (monitor_region_start > monitor_region_end)
- return -EINVAL;
- if (!monitor_region_start && !monitor_region_end &&
- !get_monitoring_region(&monitor_region_start,
- &monitor_region_end))
- return -EINVAL;
- addr_range.start = monitor_region_start;
- addr_range.end = monitor_region_end;
- return damon_set_regions(target, &addr_range, 1);
+ return damon_set_region_biggest_system_ram_default(target,
+ &monitor_region_start,
+ &monitor_region_end);
}
static int damon_reclaim_turn(bool on)
@@ -413,13 +243,8 @@ static int damon_reclaim_after_aggregation(struct damon_ctx *c)
struct damos *s;
/* update the stats parameter */
- damon_for_each_scheme(s, c) {
- nr_reclaim_tried_regions = s->stat.nr_tried;
- bytes_reclaim_tried_regions = s->stat.sz_tried;
- nr_reclaimed_regions = s->stat.nr_applied;
- bytes_reclaimed_regions = s->stat.sz_applied;
- nr_quota_exceeds = s->stat.qt_exceeds;
- }
+ damon_for_each_scheme(s, c)
+ damon_reclaim_stat = s->stat;
return damon_reclaim_handle_commit_inputs();
}
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index bdef9682d0a0..9f1219a67e3f 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -58,7 +58,7 @@ static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
err = kstrtoul(buf, 0, &min);
if (err)
- return -EINVAL;
+ return err;
range->min = min;
return count;
@@ -83,7 +83,7 @@ static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
err = kstrtoul(buf, 0, &max);
if (err)
- return -EINVAL;
+ return err;
range->max = max;
return count;
@@ -291,9 +291,7 @@ static ssize_t interval_us_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->interval_us);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t high_show(struct kobject *kobj,
@@ -312,9 +310,7 @@ static ssize_t high_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->high);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t mid_show(struct kobject *kobj,
@@ -333,9 +329,7 @@ static ssize_t mid_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->mid);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t low_show(struct kobject *kobj,
@@ -354,9 +348,7 @@ static ssize_t low_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->low);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static void damon_sysfs_watermarks_release(struct kobject *kobj)
@@ -437,9 +429,7 @@ static ssize_t sz_permil_store(struct kobject *kobj,
struct damon_sysfs_weights, kobj);
int err = kstrtouint(buf, 0, &weights->sz);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t nr_accesses_permil_show(struct kobject *kobj,
@@ -458,9 +448,7 @@ static ssize_t nr_accesses_permil_store(struct kobject *kobj,
struct damon_sysfs_weights, kobj);
int err = kstrtouint(buf, 0, &weights->nr_accesses);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t age_permil_show(struct kobject *kobj,
@@ -479,9 +467,7 @@ static ssize_t age_permil_store(struct kobject *kobj,
struct damon_sysfs_weights, kobj);
int err = kstrtouint(buf, 0, &weights->age);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static void damon_sysfs_weights_release(struct kobject *kobj)
@@ -1031,8 +1017,7 @@ static ssize_t nr_schemes_show(struct kobject *kobj,
static ssize_t nr_schemes_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_schemes *schemes = container_of(kobj,
- struct damon_sysfs_schemes, kobj);
+ struct damon_sysfs_schemes *schemes;
int nr, err = kstrtoint(buf, 0, &nr);
if (err)
@@ -1040,6 +1025,8 @@ static ssize_t nr_schemes_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ schemes = container_of(kobj, struct damon_sysfs_schemes, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_schemes_add_dirs(schemes, nr);
@@ -1110,9 +1097,7 @@ static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
struct damon_sysfs_region, kobj);
int err = kstrtoul(buf, 0, &region->start);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1131,9 +1116,7 @@ static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
struct damon_sysfs_region, kobj);
int err = kstrtoul(buf, 0, &region->end);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static void damon_sysfs_region_release(struct kobject *kobj)
@@ -1237,8 +1220,7 @@ static ssize_t nr_regions_show(struct kobject *kobj,
static ssize_t nr_regions_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_regions *regions = container_of(kobj,
- struct damon_sysfs_regions, kobj);
+ struct damon_sysfs_regions *regions;
int nr, err = kstrtoint(buf, 0, &nr);
if (err)
@@ -1246,6 +1228,8 @@ static ssize_t nr_regions_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ regions = container_of(kobj, struct damon_sysfs_regions, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_regions_add_dirs(regions, nr);
@@ -1440,8 +1424,7 @@ static ssize_t nr_targets_show(struct kobject *kobj,
static ssize_t nr_targets_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_targets *targets = container_of(kobj,
- struct damon_sysfs_targets, kobj);
+ struct damon_sysfs_targets *targets;
int nr, err = kstrtoint(buf, 0, &nr);
if (err)
@@ -1449,6 +1432,8 @@ static ssize_t nr_targets_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ targets = container_of(kobj, struct damon_sysfs_targets, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_targets_add_dirs(targets, nr);
@@ -1525,7 +1510,7 @@ static ssize_t sample_us_store(struct kobject *kobj,
int err = kstrtoul(buf, 0, &us);
if (err)
- return -EINVAL;
+ return err;
intervals->sample_us = us;
return count;
@@ -1549,7 +1534,7 @@ static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
int err = kstrtoul(buf, 0, &us);
if (err)
- return -EINVAL;
+ return err;
intervals->aggr_us = us;
return count;
@@ -1573,7 +1558,7 @@ static ssize_t update_us_store(struct kobject *kobj,
int err = kstrtoul(buf, 0, &us);
if (err)
- return -EINVAL;
+ return err;
intervals->update_us = us;
return count;
@@ -1962,8 +1947,7 @@ static ssize_t nr_contexts_show(struct kobject *kobj,
static ssize_t nr_contexts_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_contexts *contexts = container_of(kobj,
- struct damon_sysfs_contexts, kobj);
+ struct damon_sysfs_contexts *contexts;
int nr, err;
err = kstrtoint(buf, 0, &nr);
@@ -1973,6 +1957,7 @@ static ssize_t nr_contexts_store(struct kobject *kobj,
if (nr < 0 || 1 < nr)
return -EINVAL;
+ contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_contexts_add_dirs(contexts, nr);
@@ -2127,18 +2112,23 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
struct damon_sysfs_ul_range *sys_nr_regions =
sys_attrs->nr_regions_range;
-
- return damon_set_attrs(ctx, sys_intervals->sample_us,
- sys_intervals->aggr_us, sys_intervals->update_us,
- sys_nr_regions->min, sys_nr_regions->max);
+ struct damon_attrs attrs = {
+ .sample_interval = sys_intervals->sample_us,
+ .aggr_interval = sys_intervals->aggr_us,
+ .ops_update_interval = sys_intervals->update_us,
+ .min_nr_regions = sys_nr_regions->min,
+ .max_nr_regions = sys_nr_regions->max,
+ };
+ return damon_set_attrs(ctx, &attrs);
}
static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
+ bool has_pid = damon_target_has_pid(ctx);
damon_for_each_target_safe(t, next, ctx) {
- if (damon_target_has_pid(ctx))
+ if (has_pid)
put_pid(t->pid);
damon_destroy_target(t);
}
@@ -2259,11 +2249,20 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_scheme *sysfs_scheme)
{
- struct damon_sysfs_access_pattern *pattern =
+ struct damon_sysfs_access_pattern *access_pattern =
sysfs_scheme->access_pattern;
struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
+
+ struct damos_access_pattern pattern = {
+ .min_sz_region = access_pattern->sz->min,
+ .max_sz_region = access_pattern->sz->max,
+ .min_nr_accesses = access_pattern->nr_accesses->min,
+ .max_nr_accesses = access_pattern->nr_accesses->max,
+ .min_age_region = access_pattern->age->min,
+ .max_age_region = access_pattern->age->max,
+ };
struct damos_quota quota = {
.ms = sysfs_quotas->ms,
.sz = sysfs_quotas->sz,
@@ -2280,10 +2279,8 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low,
};
- return damon_new_scheme(pattern->sz->min, pattern->sz->max,
- pattern->nr_accesses->min, pattern->nr_accesses->max,
- pattern->age->min, pattern->age->max,
- sysfs_scheme->action, &quota, &wmarks);
+ return damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
+ &wmarks);
}
static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
@@ -2309,7 +2306,7 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
- if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
+ if (!damon_target_has_pid(ctx))
return;
mutex_lock(&ctx->kdamond_lock);
@@ -2455,8 +2452,7 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
struct damon_ctx *ctx;
int err;
- if (kdamond->damon_ctx &&
- damon_sysfs_ctx_running(kdamond->damon_ctx))
+ if (damon_sysfs_kdamond_running(kdamond))
return -EBUSY;
if (damon_sysfs_cmd_request.kdamond == kdamond)
return -EBUSY;
@@ -2579,19 +2575,16 @@ static ssize_t pid_show(struct kobject *kobj,
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
struct damon_sysfs_kdamond, kobj);
struct damon_ctx *ctx;
- int pid;
+ int pid = -1;
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
ctx = kdamond->damon_ctx;
- if (!ctx) {
- pid = -1;
+ if (!ctx)
goto out;
- }
+
mutex_lock(&ctx->kdamond_lock);
- if (!ctx->kdamond)
- pid = -1;
- else
+ if (ctx->kdamond)
pid = ctx->kdamond->pid;
mutex_unlock(&ctx->kdamond_lock);
out:
@@ -2657,23 +2650,18 @@ static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
kdamonds->kdamonds_arr = NULL;
}
-static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
+static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
int nr_kdamonds)
{
- int nr_running_ctxs = 0;
int i;
for (i = 0; i < nr_kdamonds; i++) {
- struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
-
- if (!ctx)
- continue;
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond)
- nr_running_ctxs++;
- mutex_unlock(&ctx->kdamond_lock);
+ if (damon_sysfs_kdamond_running(kdamonds[i]) ||
+ damon_sysfs_cmd_request.kdamond == kdamonds[i])
+ return true;
}
- return nr_running_ctxs;
+
+ return false;
}
static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
@@ -2682,15 +2670,9 @@ static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
int err, i;
- if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
+ if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
return -EBUSY;
- for (i = 0; i < kdamonds->nr; i++) {
- if (damon_sysfs_cmd_request.kdamond ==
- kdamonds->kdamonds_arr[i])
- return -EBUSY;
- }
-
damon_sysfs_kdamonds_rm_dirs(kdamonds);
if (!nr_kdamonds)
return 0;
@@ -2741,8 +2723,7 @@ static ssize_t nr_kdamonds_show(struct kobject *kobj,
static ssize_t nr_kdamonds_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
- struct damon_sysfs_kdamonds, kobj);
+ struct damon_sysfs_kdamonds *kdamonds;
int nr, err;
err = kstrtoint(buf, 0, &nr);
@@ -2751,6 +2732,8 @@ static ssize_t nr_kdamonds_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index d4f55f349100..bce37c487540 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -14,33 +14,19 @@
#include <kunit/test.h>
-static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas)
+static void __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas,
+ ssize_t nr_vmas)
{
- int i, j;
- unsigned long largest_gap, gap;
+ int i;
+ MA_STATE(mas, mt, 0, 0);
if (!nr_vmas)
return;
- for (i = 0; i < nr_vmas - 1; i++) {
- vmas[i].vm_next = &vmas[i + 1];
-
- vmas[i].vm_rb.rb_left = NULL;
- vmas[i].vm_rb.rb_right = &vmas[i + 1].vm_rb;
-
- largest_gap = 0;
- for (j = i; j < nr_vmas; j++) {
- if (j == 0)
- continue;
- gap = vmas[j].vm_start - vmas[j - 1].vm_end;
- if (gap > largest_gap)
- largest_gap = gap;
- }
- vmas[i].rb_subtree_gap = largest_gap;
- }
- vmas[i].vm_next = NULL;
- vmas[i].vm_rb.rb_right = NULL;
- vmas[i].rb_subtree_gap = 0;
+ mas_lock(&mas);
+ for (i = 0; i < nr_vmas; i++)
+ vma_mas_store(&vmas[i], &mas);
+ mas_unlock(&mas);
}
/*
@@ -72,6 +58,7 @@ static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas)
*/
static void damon_test_three_regions_in_vmas(struct kunit *test)
{
+ static struct mm_struct mm;
struct damon_addr_range regions[3] = {0,};
/* 10-20-25, 200-210-220, 300-305, 307-330 */
struct vm_area_struct vmas[] = {
@@ -83,9 +70,10 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
(struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
};
- __link_vmas(vmas, 6);
+ mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
+ __link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas));
- __damon_va_three_regions(&vmas[0], regions);
+ __damon_va_three_regions(&mm, regions);
KUNIT_EXPECT_EQ(test, 10ul, regions[0].start);
KUNIT_EXPECT_EQ(test, 25ul, regions[0].end);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 3c7b9d6dca95..ea94e0b2c311 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -113,37 +113,38 @@ static unsigned long sz_range(struct damon_addr_range *r)
*
* Returns 0 if success, or negative error code otherwise.
*/
-static int __damon_va_three_regions(struct vm_area_struct *vma,
+static int __damon_va_three_regions(struct mm_struct *mm,
struct damon_addr_range regions[3])
{
- struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
- struct vm_area_struct *last_vma = NULL;
- unsigned long start = 0;
- struct rb_root rbroot;
-
- /* Find two biggest gaps so that first_gap > second_gap > others */
- for (; vma; vma = vma->vm_next) {
- if (!last_vma) {
- start = vma->vm_start;
- goto next;
- }
+ struct damon_addr_range first_gap = {0}, second_gap = {0};
+ VMA_ITERATOR(vmi, mm, 0);
+ struct vm_area_struct *vma, *prev = NULL;
+ unsigned long start;
- if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
- rbroot.rb_node = &vma->vm_rb;
- vma = rb_entry(rb_last(&rbroot),
- struct vm_area_struct, vm_rb);
+ /*
+ * Find the two biggest gaps so that first_gap > second_gap > others.
+ * If this is too slow, it can be optimised to examine the maple
+ * tree gaps.
+ */
+ for_each_vma(vmi, vma) {
+ unsigned long gap;
+
+ if (!prev) {
+ start = vma->vm_start;
goto next;
}
-
- gap.start = last_vma->vm_end;
- gap.end = vma->vm_start;
- if (sz_range(&gap) > sz_range(&second_gap)) {
- swap(gap, second_gap);
- if (sz_range(&second_gap) > sz_range(&first_gap))
- swap(second_gap, first_gap);
+ gap = vma->vm_start - prev->vm_end;
+
+ if (gap > sz_range(&first_gap)) {
+ second_gap = first_gap;
+ first_gap.start = prev->vm_end;
+ first_gap.end = vma->vm_start;
+ } else if (gap > sz_range(&second_gap)) {
+ second_gap.start = prev->vm_end;
+ second_gap.end = vma->vm_start;
}
next:
- last_vma = vma;
+ prev = vma;
}
if (!sz_range(&second_gap) || !sz_range(&first_gap))
@@ -159,7 +160,7 @@ next:
regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
- regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
+ regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION);
return 0;
}
@@ -180,7 +181,7 @@ static int damon_va_three_regions(struct damon_target *t,
return -EINVAL;
mmap_read_lock(mm);
- rc = __damon_va_three_regions(mm->mmap, regions);
+ rc = __damon_va_three_regions(mm, regions);
mmap_read_unlock(mm);
mmput(mm);
@@ -250,8 +251,8 @@ static void __damon_va_init_regions(struct damon_ctx *ctx,
for (i = 0; i < 3; i++)
sz += regions[i].end - regions[i].start;
- if (ctx->min_nr_regions)
- sz /= ctx->min_nr_regions;
+ if (ctx->attrs.min_nr_regions)
+ sz /= ctx->attrs.min_nr_regions;
if (sz < DAMON_MIN_REGION)
sz = DAMON_MIN_REGION;
@@ -302,9 +303,14 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
pte_t *pte;
spinlock_t *ptl;
- if (pmd_huge(*pmd)) {
+ if (pmd_trans_huge(*pmd)) {
ptl = pmd_lock(walk->mm, pmd);
- if (pmd_huge(*pmd)) {
+ if (!pmd_present(*pmd)) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ if (pmd_trans_huge(*pmd)) {
damon_pmdp_mkold(pmd, walk->mm, addr);
spin_unlock(ptl);
return 0;
@@ -391,8 +397,8 @@ static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
* Functions for the access checking of the regions
*/
-static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
- struct mm_struct *mm, struct damon_region *r)
+static void __damon_va_prepare_access_check(struct mm_struct *mm,
+ struct damon_region *r)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
@@ -410,7 +416,7 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t)
- __damon_va_prepare_access_check(ctx, mm, r);
+ __damon_va_prepare_access_check(mm, r);
mmput(mm);
}
}
@@ -429,9 +435,14 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
struct damon_young_walk_private *priv = walk->private;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_huge(*pmd)) {
+ if (pmd_trans_huge(*pmd)) {
ptl = pmd_lock(walk->mm, pmd);
- if (!pmd_huge(*pmd)) {
+ if (!pmd_present(*pmd)) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ if (!pmd_trans_huge(*pmd)) {
spin_unlock(ptl);
goto regular_page;
}
@@ -532,16 +543,15 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
* mm 'mm_struct' for the given virtual address space
* r the region to be checked
*/
-static void __damon_va_check_access(struct damon_ctx *ctx,
- struct mm_struct *mm, struct damon_region *r)
+static void __damon_va_check_access(struct mm_struct *mm,
+ struct damon_region *r, bool same_target)
{
- static struct mm_struct *last_mm;
static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
- if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
+ if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
if (last_accessed)
r->nr_accesses++;
@@ -552,7 +562,6 @@ static void __damon_va_check_access(struct damon_ctx *ctx,
if (last_accessed)
r->nr_accesses++;
- last_mm = mm;
last_addr = r->sampling_addr;
}
@@ -562,14 +571,17 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
struct mm_struct *mm;
struct damon_region *r;
unsigned int max_nr_accesses = 0;
+ bool same_target;
damon_for_each_target(t, ctx) {
mm = damon_get_mm(t);
if (!mm)
continue;
+ same_target = false;
damon_for_each_region(r, t) {
- __damon_va_check_access(ctx, mm, r);
+ __damon_va_check_access(mm, r, same_target);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
+ same_target = true;
}
mmput(mm);
}
@@ -581,9 +593,8 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
* Functions for the target validity check and cleanup
*/
-static bool damon_va_target_valid(void *target)
+static bool damon_va_target_valid(struct damon_target *t)
{
- struct damon_target *t = target;
struct task_struct *task;
task = damon_get_task_struct(t);
@@ -646,6 +657,9 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
case DAMOS_STAT:
return 0;
default:
+ /*
+ * DAMOS actions that are not yet supported by 'vaddr'.
+ */
return 0;
}
@@ -659,7 +673,7 @@ static int damon_va_scheme_score(struct damon_ctx *context,
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pageout_score(context, r, scheme);
+ return damon_cold_score(context, r, scheme);
default:
break;
}