Re: [PATCH 2/7] perf/cgroup: order events in RB tree by cgroup id

From: Ian Rogers
Date: Wed Jul 24 2019 - 18:34:47 EST


On Mon, Jul 8, 2019 at 9:16 AM Peter Zijlstra <peterz@xxxxxxxxxxxxx> wrote:
>
> On Mon, Jul 01, 2019 at 11:59:50PM -0700, Ian Rogers wrote:
> > +static int visit_groups_merge(struct perf_event_context *ctx,
> > + struct perf_cpu_context *cpuctx,
> > + struct perf_event_groups *groups,
> > + int (*func)(struct perf_event_context *,
> > + struct perf_cpu_context *,
> > + struct perf_event *,
> > + int *),
> > + int *data)
>
> > -struct sched_in_data {
> > - struct perf_event_context *ctx;
> > - struct perf_cpu_context *cpuctx;
> > - int can_add_hw;
> > -};
> > -
> > -static int pinned_sched_in(struct perf_event *event, void *data)
> > +static int pinned_sched_in(struct perf_event_context *ctx,
> > + struct perf_cpu_context *cpuctx,
> > + struct perf_event *event,
> > + int *unused)
> > {
> > - struct sched_in_data *sid = data;
> > -
> > if (event->state <= PERF_EVENT_STATE_OFF)
> > return 0;
> >
> > if (!event_filter_match(event))
> > return 0;
> >
> > - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
> > - if (!group_sched_in(event, sid->cpuctx, sid->ctx))
> > - list_add_tail(&event->active_list, &sid->ctx->pinned_active);
> > + if (group_can_go_on(event, cpuctx, 1)) {
> > + if (!group_sched_in(event, cpuctx, ctx))
> > + list_add_tail(&event->active_list, &ctx->pinned_active);
> > }
> >
> > /*
> > @@ -3317,24 +3444,25 @@ static int pinned_sched_in(struct perf_event *event, void *data)
> > return 0;
> > }
> >
> > -static int flexible_sched_in(struct perf_event *event, void *data)
> > +static int flexible_sched_in(struct perf_event_context *ctx,
> > + struct perf_cpu_context *cpuctx,
> > + struct perf_event *event,
> > + int *can_add_hw)
> > {
> > - struct sched_in_data *sid = data;
> > -
> > if (event->state <= PERF_EVENT_STATE_OFF)
> > return 0;
> >
> > if (!event_filter_match(event))
> > return 0;
> >
> > - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
> > - int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
> > + if (group_can_go_on(event, cpuctx, *can_add_hw)) {
> > + int ret = group_sched_in(event, cpuctx, ctx);
> > if (ret) {
> > - sid->can_add_hw = 0;
> > - sid->ctx->rotate_necessary = 1;
> > + *can_add_hw = 0;
> > + ctx->rotate_necessary = 1;
> > return 0;
> > }
> > - list_add_tail(&event->active_list, &sid->ctx->flexible_active);
> > + list_add_tail(&event->active_list, &ctx->flexible_active);
> > }
> >
> > return 0;
> > @@ -3344,30 +3472,24 @@ static void
> > ctx_pinned_sched_in(struct perf_event_context *ctx,
> > struct perf_cpu_context *cpuctx)
> > {
> > - struct sched_in_data sid = {
> > - .ctx = ctx,
> > - .cpuctx = cpuctx,
> > - .can_add_hw = 1,
> > - };
> > -
> > - visit_groups_merge(&ctx->pinned_groups,
> > - smp_processor_id(),
> > - pinned_sched_in, &sid);
> > + visit_groups_merge(ctx,
> > + cpuctx,
> > + &ctx->pinned_groups,
> > + pinned_sched_in,
> > + NULL);
> > }
> >
> > static void
> > ctx_flexible_sched_in(struct perf_event_context *ctx,
> > struct perf_cpu_context *cpuctx)
> > {
> > - struct sched_in_data sid = {
> > - .ctx = ctx,
> > - .cpuctx = cpuctx,
> > - .can_add_hw = 1,
> > - };
> > + int can_add_hw = 1;
> >
> > - visit_groups_merge(&ctx->flexible_groups,
> > - smp_processor_id(),
> > - flexible_sched_in, &sid);
> > + visit_groups_merge(ctx,
> > + cpuctx,
> > + &ctx->flexible_groups,
> > + flexible_sched_in,
> > + &can_add_hw);
> > }
>
> It is not clear to me why you're doing away with that sched_in_data
> abstraction. AFAICT that has no material impact on this patch.

The change alters visit_groups_merge parameters to take ctx and cpuctx
in order to compute the number of iterators. ctx and cpuctx were taken
out of sched_in_data to avoid passing them twice. can_add_hw remains
but not wrapped inside of sched_in_data as it seemed unnecessary to
wrap a single boolean.

Thanks,
Ian