Re: [PATCH v2 02/11] mm/mempolicy: introduce MPOL_WEIGHTED_INTERLEAVE for weighted interleaving

From: kernel test robot
Date: Sat Dec 09 2023 - 16:24:31 EST


Hi Gregory,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on deller-parisc/for-next powerpc/next powerpc/fixes s390/features jcmvbkbc-xtensa/xtensa-for-next arnd-asm-generic/master linus/master v6.7-rc4 next-20231208]
[cannot apply to tip/x86/asm geert-m68k/for-next geert-m68k/for-linus]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Gregory-Price/mm-mempolicy-implement-the-sysfs-based-weighted_interleave-interface/20231209-150314
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20231209065931.3458-3-gregory.price%40memverge.com
patch subject: [PATCH v2 02/11] mm/mempolicy: introduce MPOL_WEIGHTED_INTERLEAVE for weighted interleaving
config: x86_64-rhel-8.3-rust (https://download.01.org/0day-ci/archive/20231210/202312100543.ix4jxw81-lkp@xxxxxxxxx/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231210/202312100543.ix4jxw81-lkp@xxxxxxxxx/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@xxxxxxxxx>
| Closes: https://lore.kernel.org/oe-kbuild-all/202312100543.ix4jxw81-lkp@xxxxxxxxx/

All warnings (new ones prefixed by >>):

>> mm/mempolicy.c:2355:3: warning: variable 'weight_total' is uninitialized when used here [-Wuninitialized]
weight_total += weight;
^~~~~~~~~~~~
mm/mempolicy.c:2341:27: note: initialize the variable 'weight_total' to silence this warning
unsigned int weight_total;
^
= 0
1 warning generated.


vim +/weight_total +2355 mm/mempolicy.c

2329
2330 static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
2331 struct mempolicy *pol, unsigned long nr_pages,
2332 struct page **page_array)
2333 {
2334 struct task_struct *me = current;
2335 unsigned long total_allocated = 0;
2336 unsigned long nr_allocated;
2337 unsigned long rounds;
2338 unsigned long node_pages, delta;
2339 unsigned char weight;
2340 unsigned char weights[MAX_NUMNODES];
2341 unsigned int weight_total;
2342 unsigned long rem_pages = nr_pages;
2343 nodemask_t nodes = pol->nodes;
2344 int nnodes, node, prev_node;
2345 int i;
2346
2347 /* Stabilize the nodemask on the stack */
2348 barrier();
2349
2350 nnodes = nodes_weight(nodes);
2351
2352 /* Collect weights and save them on stack so they don't change */
2353 for_each_node_mask(node, nodes) {
2354 weight = iw_table[node];
> 2355 weight_total += weight;
2356 weights[node] = weight;
2357 }
2358
2359 /* Continue allocating from most recent node and adjust the nr_pages */
2360 if (pol->wil.cur_weight) {
2361 node = next_node_in(me->il_prev, nodes);
2362 node_pages = pol->wil.cur_weight;
2363 if (node_pages > rem_pages)
2364 node_pages = rem_pages;
2365 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2366 NULL, page_array);
2367 page_array += nr_allocated;
2368 total_allocated += nr_allocated;
2369 /* if that's all the pages, no need to interleave */
2370 if (rem_pages <= pol->wil.cur_weight) {
2371 pol->wil.cur_weight -= rem_pages;
2372 return total_allocated;
2373 }
2374 /* Otherwise we adjust nr_pages down, and continue from there */
2375 rem_pages -= pol->wil.cur_weight;
2376 pol->wil.cur_weight = 0;
2377 prev_node = node;
2378 }
2379
2380 /* Now we can continue allocating as if from 0 instead of an offset */
2381 rounds = rem_pages / weight_total;
2382 delta = rem_pages % weight_total;
2383 for (i = 0; i < nnodes; i++) {
2384 node = next_node_in(prev_node, nodes);
2385 weight = weights[node];
2386 node_pages = weight * rounds;
2387 if (delta) {
2388 if (delta > weight) {
2389 node_pages += weight;
2390 delta -= weight;
2391 } else {
2392 node_pages += delta;
2393 delta = 0;
2394 }
2395 }
2396 /* We may not make it all the way around */
2397 if (!node_pages)
2398 break;
2399 /* If an over-allocation would occur, floor it */
2400 if (node_pages + total_allocated > nr_pages) {
2401 node_pages = nr_pages - total_allocated;
2402 delta = 0;
2403 }
2404 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2405 NULL, page_array);
2406 page_array += nr_allocated;
2407 total_allocated += nr_allocated;
2408 prev_node = node;
2409 }
2410
2411 /*
2412 * Finally, we need to update me->il_prev and pol->wil.cur_weight
2413 * if there were overflow pages, but not equivalent to the node
2414 * weight, set the cur_weight to node_weight - delta and the
2415 * me->il_prev to the previous node. Otherwise if it was perfect
2416 * we can simply set il_prev to node and cur_weight to 0
2417 */
2418 if (node_pages) {
2419 me->il_prev = prev_node;
2420 node_pages %= weight;
2421 pol->wil.cur_weight = weight - node_pages;
2422 } else {
2423 me->il_prev = node;
2424 pol->wil.cur_weight = 0;
2425 }
2426
2427 return total_allocated;
2428 }
2429

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki