[RFC PATCH 4/4] mm/damon: Make access check configurable

From: SeongJae Park
Date: Thu Apr 09 2020 - 05:44:38 EST


From: SeongJae Park <sjpark@xxxxxxxxx>

DAMON assumes the target region is in virtual address space and
therefore uses PTE Accessed bit checking for access checking. However,
some users might want to use architecture-specific, more accurate and
light-weight access checking features. Also, some users might want to
use DAMON for different address spaces such as physical memory space,
which needs different ways to check the access.

This commit allows DAMON users to configure the access check function to
their own version.

Signed-off-by: SeongJae Park <sjpark@xxxxxxxxx>
---
include/linux/damon.h | 2 ++
mm/damon.c | 22 +++++++++++++---------
2 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/include/linux/damon.h b/include/linux/damon.h
index a051b5d966ed..188d5b89b303 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -95,6 +95,8 @@ struct damon_ctx {
/* callbacks */
void (*init_target_regions)(struct damon_ctx *context);
void (*update_target_regions)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
void (*sample_cb)(struct damon_ctx *context);
void (*aggregate_cb)(struct damon_ctx *context);
};
diff --git a/mm/damon.c b/mm/damon.c
index da0e7efdf1e1..20a66a6307d1 100644
--- a/mm/damon.c
+++ b/mm/damon.c
@@ -59,6 +59,8 @@

static void kdamond_init_vm_regions(struct damon_ctx *ctx);
static void kdamond_update_vm_regions(struct damon_ctx *ctx);
+static void kdamond_prepare_vm_access_checks(struct damon_ctx *ctx);
+static unsigned int kdamond_check_vm_accesses(struct damon_ctx *ctx);

/* A monitoring context for debugfs interface users. */
static struct damon_ctx damon_user_ctx = {
@@ -70,6 +72,8 @@ static struct damon_ctx damon_user_ctx = {

.init_target_regions = kdamond_init_vm_regions,
.update_target_regions = kdamond_update_vm_regions,
+ .prepare_access_checks = kdamond_prepare_vm_access_checks,
+ .check_accesses = kdamond_check_vm_accesses,
};

/*
@@ -506,7 +510,7 @@ static void damon_mkold(struct mm_struct *mm, unsigned long addr)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}

-static void damon_prepare_access_check(struct damon_ctx *ctx,
+static void damon_prepare_vm_access_check(struct damon_ctx *ctx,
struct mm_struct *mm, struct damon_region *r)
{
r->sampling_addr = damon_rand(ctx, r->ar.start, r->ar.end);
@@ -514,7 +518,7 @@ static void damon_prepare_access_check(struct damon_ctx *ctx,
damon_mkold(mm, r->sampling_addr);
}

-static void kdamond_prepare_access_checks(struct damon_ctx *ctx)
+static void kdamond_prepare_vm_access_checks(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
@@ -525,7 +529,7 @@ static void kdamond_prepare_access_checks(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t)
- damon_prepare_access_check(ctx, mm, r);
+ damon_prepare_vm_access_check(ctx, mm, r);
mmput(mm);
}
}
@@ -563,7 +567,7 @@ static bool damon_young(struct mm_struct *mm, unsigned long addr,
* mm 'mm_struct' for the given virtual address space
* r the region to be checked
*/
-static void damon_check_access(struct damon_ctx *ctx,
+static void damon_check_vm_access(struct damon_ctx *ctx,
struct mm_struct *mm, struct damon_region *r)
{
static struct mm_struct *last_mm;
@@ -587,7 +591,7 @@ static void damon_check_access(struct damon_ctx *ctx,
last_addr = r->sampling_addr;
}

-static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
+static unsigned int kdamond_check_vm_accesses(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
@@ -599,12 +603,12 @@ static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t) {
- damon_check_access(ctx, mm, r);
+ damon_check_vm_access(ctx, mm, r);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
-
mmput(mm);
}
+
return max_nr_accesses;
}

@@ -1134,13 +1138,13 @@ static int kdamond_fn(void *data)
pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
ctx->init_target_regions(ctx);
while (!kdamond_need_stop(ctx)) {
- kdamond_prepare_access_checks(ctx);
+ ctx->prepare_access_checks(ctx);
if (ctx->sample_cb)
ctx->sample_cb(ctx);

usleep_range(ctx->sample_interval, ctx->sample_interval + 1);

- max_nr_accesses = kdamond_check_accesses(ctx);
+ max_nr_accesses = ctx->check_accesses(ctx);

if (kdamond_aggregate_interval_passed(ctx)) {
kdamond_merge_regions(ctx, max_nr_accesses / 10);
--
2.17.1