Re: [PATCH 3/4] sched: add sched_numa_find_nth_cpu()

From: Yury Norov
Date: Thu Nov 10 2022 - 23:12:41 EST


On Thu, Nov 10, 2022 at 08:00:26PM -0800, Yury Norov wrote:
> The function finds Nth set CPU in a given cpumask starting from a given
> node.
>
> Leveraging the fact that each hop in sched_domains_numa_masks includes the
> same or greater number of CPUs than the previous one, we can use binary
> search on hops instead of linear walk, which makes the overall complexity
> of O(log n) in terms of number of cpumask_weight() calls.
>
> Signed-off-by: Yury Norov <yury.norov@xxxxxxxxx>
> ---
> include/linux/topology.h | 8 ++++++++
> kernel/sched/topology.c | 42 ++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 50 insertions(+)
>
> diff --git a/include/linux/topology.h b/include/linux/topology.h
> index 4564faafd0e1..63048ac3207c 100644
> --- a/include/linux/topology.h
> +++ b/include/linux/topology.h
> @@ -245,5 +245,13 @@ static inline const struct cpumask *cpu_cpu_mask(int cpu)
> return cpumask_of_node(cpu_to_node(cpu));
> }
>
> +#ifdef CONFIG_NUMA
> +int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
> +#else
> +int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)

Ah, this should be static of course.

> +{
> + return cpumask_nth(cpu, cpus);
> +}
> +#endif /* CONFIG_NUMA */
>
> #endif /* _LINUX_TOPOLOGY_H */
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 8739c2a5a54e..c8f56287de46 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -2067,6 +2067,48 @@ int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
> return found;
> }
>
> +/*
> + * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu
> + * closest to @cpu from @cpumask.
> + * cpumask: cpumask to find a cpu from
> + * cpu: Nth cpu to find
> + *
> + * returns: cpu, or >= nr_cpu_ids when nothing found.
> + */
> +int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
> +{
> + unsigned int first = 0, mid, last = sched_domains_numa_levels;
> + struct cpumask ***masks;
> + int w, ret = nr_cpu_ids;
> +
> + rcu_read_lock();
> + masks = rcu_dereference(sched_domains_numa_masks);
> + if (!masks)
> + goto out;
> +
> + while (last >= first) {
> + mid = (last + first) / 2;
> +
> + if (cpumask_weight_and(cpus, masks[mid][node]) <= cpu) {
> + first = mid + 1;
> + continue;
> + }
> +
> + w = (mid == 0) ? 0 : cpumask_weight_and(cpus, masks[mid - 1][node]);
> + if (w <= cpu)
> + break;
> +
> + last = mid - 1;
> + }
> +
> + ret = (mid == 0) ?
> + cpumask_nth_and(cpu - w, cpus, masks[mid][node]) :
> + cpumask_nth_and_andnot(cpu - w, cpus, masks[mid][node], masks[mid - 1][node]);
> +out:
> + rcu_read_unlock();
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
> #endif /* CONFIG_NUMA */
>
> static int __sdt_alloc(const struct cpumask *cpu_map)
> --
> 2.34.1