Re: [PATCH v3 2/3] mm, page_owner: decouple freeing stack trace from debug_pagealloc

From: Qian Cai
Date: Mon Oct 07 2019 - 06:41:56 EST




> On Oct 7, 2019, at 5:18 AM, Vlastimil Babka <vbabka@xxxxxxx> wrote:
>
> The commit 8974558f49a6 ("mm, page_owner, debug_pagealloc: save and dump
> freeing stack trace") enhanced page_owner to also store freeing stack trace,
> when debug_pagealloc is also enabled. KASAN would also like to do this [1] to
> improve error reports to debug e.g. UAF issues. Kirill has suggested that the
> freeing stack trace saving should be also possible to be enabled separately
> from KASAN or debug_pagealloc, i.e. with an extra boot option. Qian argued that
> we have enough options already, and avoiding the extra overhead is not worth
> the complications in the case of a debugging option. Kirill noted that the
> extra stack handle in struct page_owner requires 0.1% of memory.
>
> This patch therefore enables free stack saving whenever page_owner is enabled,
> regardless of whether debug_pagealloc or KASAN is also enabled. KASAN kernels
> booted with page_owner=on will thus benefit from the improved error reports.
>
> [1] https://bugzilla.kernel.org/show_bug.cgi?id=203967
>
> Suggested-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx>
> Suggested-by: Walter Wu <walter-zh.wu@xxxxxxxxxxxx>
> Suggested-by: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>
> Suggested-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
> Suggested-by: Qian Cai <cai@xxxxxx>
> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
> ---
> Documentation/dev-tools/kasan.rst | 3 +++
> mm/page_owner.c | 28 +++++++---------------------
> 2 files changed, 10 insertions(+), 21 deletions(-)

The diffstat looks nice!

Reviewed-by: Qian Cai <cai@xxxxxx>

>
> diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
> index b72d07d70239..525296121d89 100644
> --- a/Documentation/dev-tools/kasan.rst
> +++ b/Documentation/dev-tools/kasan.rst
> @@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 - 2 times faster.
> Both KASAN modes work with both SLUB and SLAB memory allocators.
> For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
>
> +To augment reports with last allocation and freeing stack of the physical page,
> +it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
> +
> To disable instrumentation for specific files or directories, add a line
> similar to the following to the respective kernel Makefile:
>
> diff --git a/mm/page_owner.c b/mm/page_owner.c
> index d3cf5d336ccf..de1916ac3e24 100644
> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -24,12 +24,10 @@ struct page_owner {
> short last_migrate_reason;
> gfp_t gfp_mask;
> depot_stack_handle_t handle;
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> depot_stack_handle_t free_handle;
> -#endif
> };
>
> -static bool page_owner_disabled = true;
> +static bool page_owner_enabled = false;
> DEFINE_STATIC_KEY_FALSE(page_owner_inited);
>
> static depot_stack_handle_t dummy_handle;
> @@ -44,7 +42,7 @@ static int __init early_page_owner_param(char *buf)
> return -EINVAL;
>
> if (strcmp(buf, "on") == 0)
> - page_owner_disabled = false;
> + page_owner_enabled = true;
>
> return 0;
> }
> @@ -52,10 +50,7 @@ early_param("page_owner", early_page_owner_param);
>
> static bool need_page_owner(void)
> {
> - if (page_owner_disabled)
> - return false;
> -
> - return true;
> + return page_owner_enabled;
> }
>
> static __always_inline depot_stack_handle_t create_dummy_stack(void)
> @@ -84,7 +79,7 @@ static noinline void register_early_stack(void)
>
> static void init_page_owner(void)
> {
> - if (page_owner_disabled)
> + if (!page_owner_enabled)
> return;
>
> register_dummy_stack();
> @@ -148,25 +143,18 @@ void __reset_page_owner(struct page *page, unsigned int order)
> {
> int i;
> struct page_ext *page_ext;
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> depot_stack_handle_t handle = 0;
> struct page_owner *page_owner;
>
> - if (debug_pagealloc_enabled())
> - handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
> -#endif
> + handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
>
> page_ext = lookup_page_ext(page);
> if (unlikely(!page_ext))
> return;
> for (i = 0; i < (1 << order); i++) {
> __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> - if (debug_pagealloc_enabled()) {
> - page_owner = get_page_owner(page_ext);
> - page_owner->free_handle = handle;
> - }
> -#endif
> + page_owner = get_page_owner(page_ext);
> + page_owner->free_handle = handle;
> page_ext = page_ext_next(page_ext);
> }
> }
> @@ -450,7 +438,6 @@ void __dump_page_owner(struct page *page)
> stack_trace_print(entries, nr_entries, 0);
> }
>
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> handle = READ_ONCE(page_owner->free_handle);
> if (!handle) {
> pr_alert("page_owner free stack trace missing\n");
> @@ -459,7 +446,6 @@ void __dump_page_owner(struct page *page)
> pr_alert("page last free stack trace:\n");
> stack_trace_print(entries, nr_entries, 0);
> }
> -#endif
>
> if (page_owner->last_migrate_reason != -1)
> pr_alert("page has been migrated, last migrate reason: %s\n",
> --
> 2.23.0