Re: [PATCH v20 2/5] fs/proc/task_mmu: Implement IOCTL to get and optionally clear info about PTEs

From: kernel test robot
Date: Fri Jun 23 2023 - 19:04:52 EST


Hi Muhammad,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on next-20230623]
[cannot apply to linus/master v6.4-rc7]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Muhammad-Usama-Anjum/userfaultfd-UFFD_FEATURE_WP_ASYNC/20230621-171253
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20230621072404.2918101-3-usama.anjum%40collabora.com
patch subject: [PATCH v20 2/5] fs/proc/task_mmu: Implement IOCTL to get and optionally clear info about PTEs
config: s390-randconfig-s043-20230622 (https://download.01.org/0day-ci/archive/20230624/202306240610.V4FYUL8b-lkp@xxxxxxxxx/config)
compiler: s390-linux-gcc (GCC) 12.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230624/202306240610.V4FYUL8b-lkp@xxxxxxxxx/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@xxxxxxxxx>
| Closes: https://lore.kernel.org/oe-kbuild-all/202306240610.V4FYUL8b-lkp@xxxxxxxxx/

sparse warnings: (new ones prefixed by >>)
>> fs/proc/task_mmu.c:2209:13: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct page_region [noderef] __user *vec @@ got struct page_region * @@
fs/proc/task_mmu.c:2209:13: sparse: expected struct page_region [noderef] __user *vec
fs/proc/task_mmu.c:2209:13: sparse: got struct page_region *

vim +2209 fs/proc/task_mmu.c

2193
2194 static long do_pagemap_scan(struct mm_struct *mm, unsigned long __arg)
2195 {
2196 struct pm_scan_arg __user *uarg = (struct pm_scan_arg __user *)__arg;
2197 unsigned long long start, end, walk_start, walk_end;
2198 unsigned long long empty_slots, vec_index = 0;
2199 struct mmu_notifier_range range;
2200 struct page_region __user *vec;
2201 struct pagemap_scan_private p;
2202 struct pm_scan_arg arg;
2203 int ret = 0;
2204
2205 if (copy_from_user(&arg, uarg, sizeof(arg)))
2206 return -EFAULT;
2207
2208 start = untagged_addr((unsigned long)arg.start);
> 2209 vec = (struct page_region *)untagged_addr((unsigned long)arg.vec);
2210
2211 ret = pagemap_scan_args_valid(&arg, start, vec);
2212 if (ret)
2213 return ret;
2214
2215 end = start + arg.len;
2216 p.max_pages = arg.max_pages;
2217 p.found_pages = 0;
2218 p.required_mask = arg.required_mask;
2219 p.anyof_mask = arg.anyof_mask;
2220 p.excluded_mask = arg.excluded_mask;
2221 p.return_mask = arg.return_mask;
2222 p.flags = arg.flags;
2223 p.flags |= ((p.required_mask | p.anyof_mask | p.excluded_mask) &
2224 PAGE_IS_WRITTEN) ? PM_SCAN_REQUIRE_UFFD : 0;
2225 p.cur_buf.start = p.cur_buf.len = p.cur_buf.flags = 0;
2226 p.vec_buf = NULL;
2227 p.vec_buf_len = PAGEMAP_WALK_SIZE >> PAGE_SHIFT;
2228
2229 /*
2230 * Allocate smaller buffer to get output from inside the page walk
2231 * functions and walk page range in PAGEMAP_WALK_SIZE size chunks. As
2232 * we want to return output to user in compact form where no two
2233 * consecutive regions should be continuous and have the same flags.
2234 * So store the latest element in p.cur_buf between different walks and
2235 * store the p.cur_buf at the end of the walk to the user buffer.
2236 */
2237 if (IS_PM_SCAN_GET(p.flags)) {
2238 p.vec_buf = kmalloc_array(p.vec_buf_len, sizeof(*p.vec_buf),
2239 GFP_KERNEL);
2240 if (!p.vec_buf)
2241 return -ENOMEM;
2242 }
2243
2244 if (IS_PM_SCAN_WP(p.flags)) {
2245 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2246 mm, start, end);
2247 mmu_notifier_invalidate_range_start(&range);
2248 }
2249
2250 walk_start = walk_end = start;
2251 while (walk_end < end && !ret) {
2252 if (IS_PM_SCAN_GET(p.flags)) {
2253 p.vec_buf_index = 0;
2254
2255 /*
2256 * All data is copied to cur_buf first. When more data
2257 * is found, we push cur_buf to vec_buf and copy new
2258 * data to cur_buf. Subtract 1 from length as the
2259 * index of cur_buf isn't counted in length.
2260 */
2261 empty_slots = arg.vec_len - vec_index;
2262 p.vec_buf_len = min(p.vec_buf_len, empty_slots - 1);
2263 }
2264
2265 walk_end = (walk_start + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
2266 if (walk_end > end)
2267 walk_end = end;
2268
2269 ret = mmap_read_lock_killable(mm);
2270 if (ret)
2271 goto free_data;
2272 ret = walk_page_range(mm, walk_start, walk_end,
2273 &pagemap_scan_ops, &p);
2274 mmap_read_unlock(mm);
2275
2276 if (ret && ret != PM_SCAN_BUFFER_FULL &&
2277 ret != PM_SCAN_FOUND_MAX_PAGES)
2278 goto free_data;
2279
2280 walk_start = walk_end;
2281 if (IS_PM_SCAN_GET(p.flags) && p.vec_buf_index) {
2282 if (copy_to_user(&vec[vec_index], p.vec_buf,
2283 p.vec_buf_index * sizeof(*p.vec_buf))) {
2284 /*
2285 * Return error even though the OP succeeded
2286 */
2287 ret = -EFAULT;
2288 goto free_data;
2289 }
2290 vec_index += p.vec_buf_index;
2291 }
2292 }
2293
2294 if (p.cur_buf.len) {
2295 if (copy_to_user(&vec[vec_index], &p.cur_buf, sizeof(p.cur_buf))) {
2296 ret = -EFAULT;
2297 goto free_data;
2298 }
2299 vec_index++;
2300 }
2301
2302 ret = vec_index;
2303
2304 free_data:
2305 if (IS_PM_SCAN_WP(p.flags))
2306 mmu_notifier_invalidate_range_end(&range);
2307
2308 kfree(p.vec_buf);
2309 return ret;
2310 }
2311

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki