Re: [PATCH v27 2/6] fs/proc/task_mmu: Implement IOCTL to get and optionally clear info about PTEs

From: Andrei Vagin
Date: Tue Aug 08 2023 - 15:24:45 EST


On Tue, Aug 8, 2023 at 3:43 AM Muhammad Usama Anjum
<usama.anjum@xxxxxxxxxxxxx> wrote:

....

> +static int pagemap_scan_output(unsigned long categories,
> + struct pagemap_scan_private *p,
> + unsigned long addr, unsigned long *end)
> +{
> + unsigned long n_pages, total_pages;
> + int ret = 0;
> +
> + if (!p->vec_buf)
> + return 0;
> +
> + categories &= p->arg.return_mask;
> +
> + n_pages = (*end - addr) / PAGE_SIZE;
> + if (check_add_overflow(p->found_pages, n_pages, &total_pages) || //TODO

Need to fix this TODO.

> + total_pages > p->arg.max_pages) {
> + size_t n_too_much = total_pages - p->arg.max_pages;
> + *end -= n_too_much * PAGE_SIZE;
> + n_pages -= n_too_much;
> + ret = -ENOSPC;
> + }
> +
> + if (!pagemap_scan_push_range(categories, p, addr, *end)) {
> + *end = addr;
> + n_pages = 0;
> + ret = -ENOSPC;
> + }
> +
> + p->found_pages += n_pages;
> + if (ret)
> + p->walk_end_addr = *end;
> +
> + return ret;
> +}
> +

...

> +static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
> +{
> + struct mmu_notifier_range range;
> + struct pagemap_scan_private p;
> + unsigned long walk_start;
> + size_t n_ranges_out = 0;
> + int ret;
> +
> + memset(&p, 0, sizeof(p));
> + ret = pagemap_scan_get_args(&p.arg, uarg);
> + if (ret)
> + return ret;
> +
> + p.masks_of_interest = MASKS_OF_INTEREST(p.arg);
> + ret = pagemap_scan_init_bounce_buffer(&p);
> + if (ret)
> + return ret;
> +
> + /* Protection change for the range is going to happen. */
> + if (p.arg.flags & PM_SCAN_WP_MATCHING) {
> + mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
> + mm, p.arg.start, p.arg.end);
> + mmu_notifier_invalidate_range_start(&range);
> + }
> +
> + walk_start = p.arg.start;
> + for (; walk_start < p.arg.end; walk_start = p.arg.walk_end) {
> + int n_out;
> +
> + if (fatal_signal_pending(current)) {
> + ret = -EINTR;
> + break;
> + }
> +
> + ret = mmap_read_lock_killable(mm);
> + if (ret)
> + break;
> + ret = walk_page_range(mm, walk_start, p.arg.end,
> + &pagemap_scan_ops, &p);
> + mmap_read_unlock(mm);
> +
> + n_out = pagemap_scan_flush_buffer(&p);
> + if (n_out < 0)
> + ret = n_out;
> + else
> + n_ranges_out += n_out;
> +
> + if (ret != -ENOSPC || p.arg.vec_len - 1 == 0 ||
> + p.found_pages == p.arg.max_pages) {
> + p.walk_end_addr = p.arg.end;

You should not change p.walk_end_addr If ret is ENOSPC. Pls add a test
case to check this.

> + break;
> + }
> + }
> +
> + if (p.cur_buf.start != p.cur_buf.end) {
> + if (copy_to_user(p.vec_out, &p.cur_buf, sizeof(p.cur_buf)))
> + ret = -EFAULT;
> + else
> + ++n_ranges_out;
> + }
> +
> + /* ENOSPC signifies early stop (buffer full) from the walk. */
> + if (!ret || ret == -ENOSPC)
> + ret = n_ranges_out;
> +
> + p.arg.walk_end = p.walk_end_addr ? p.walk_end_addr : walk_start;
> + if (pagemap_scan_writeback_args(&p.arg, uarg))
> + ret = -EFAULT;
> +
> + if (p.arg.flags & PM_SCAN_WP_MATCHING)
> + mmu_notifier_invalidate_range_end(&range);
> +
> + kfree(p.vec_buf);
> + return ret;
> +}

Thanks,
Andrei