Re: [PATCH 2/7] perf: Generalize task_function_call()ers

From: Peter Zijlstra
Date: Thu Dec 17 2015 - 08:40:43 EST


On Tue, Dec 08, 2015 at 05:57:00PM +0100, Peter Zijlstra wrote:
> On Tue, Dec 08, 2015 at 06:42:01PM +0200, Alexander Shishkin wrote:
> > > +void ___perf_event_disable(void *info)
> >
> > Only maybe change these to __perf_event_disable_locked() or something
> > visually distinctive from the 'active' callback?
>
> Yeah, I ran out of naming-foo and punted. I'll give it another go
> tomorrow.

How about something like so?

Its a bit 'weird' but they're already long function names and adding
things like _locked to it makes them really rather unwieldy.

---
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1660,7 +1660,7 @@ struct remove_event {
bool detach_group;
};

-static void ___perf_remove_from_context(void *info)
+static void __I_perf_remove_from_context(void *info)
{
struct remove_event *re = info;
struct perf_event *event = re->event;
@@ -1677,7 +1677,7 @@ static void ___perf_remove_from_context(
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
-static int __perf_remove_from_context(void *info)
+static int __A_perf_remove_from_context(void *info)
{
struct remove_event *re = info;
struct perf_event *event = re->event;
@@ -1721,14 +1721,14 @@ static void perf_remove_from_context(str

lockdep_assert_held(&ctx->mutex);

- event_function_call(event, __perf_remove_from_context,
- ___perf_remove_from_context, &re);
+ event_function_call(event, __A_perf_remove_from_context,
+ __I_perf_remove_from_context, &re);
}

/*
* Cross CPU call to disable a performance event
*/
-int __perf_event_disable(void *info)
+int __A_perf_event_disable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -1766,7 +1766,7 @@ int __perf_event_disable(void *info)
return 0;
}

-void ___perf_event_disable(void *info)
+void __I_perf_event_disable(void *info)
{
struct perf_event *event = info;

@@ -1804,8 +1804,8 @@ static void _perf_event_disable(struct p
}
raw_spin_unlock_irq(&ctx->lock);

- event_function_call(event, __perf_event_disable,
- ___perf_event_disable, event);
+ event_function_call(event, __A_perf_event_disable,
+ __I_perf_event_disable, event);
}

/*
@@ -2058,7 +2058,7 @@ static void perf_event_sched_in(struct p
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}

-static void ___perf_install_in_context(void *info)
+static void __I_perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -2075,7 +2075,7 @@ static void ___perf_install_in_context(v
*
* Must be called with ctx->mutex held
*/
-static int __perf_install_in_context(void *info)
+static int __A_perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -2152,8 +2152,8 @@ perf_install_in_context(struct perf_even
if (event->cpu != -1)
event->cpu = cpu;

- event_function_call(event, __perf_install_in_context,
- ___perf_install_in_context, event);
+ event_function_call(event, __A_perf_install_in_context,
+ __I_perf_install_in_context, event);
}

/*
@@ -2180,7 +2180,7 @@ static void __perf_event_mark_enabled(st
/*
* Cross CPU call to enable a performance event
*/
-static int __perf_event_enable(void *info)
+static int __A_perf_event_enable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
@@ -2256,7 +2256,7 @@ static int __perf_event_enable(void *inf
return 0;
}

-void ___perf_event_enable(void *info)
+void __I_perf_event_enable(void *info)
{
__perf_event_mark_enabled((struct perf_event *)info);
}
@@ -2291,8 +2291,8 @@ static void _perf_event_enable(struct pe
event->state = PERF_EVENT_STATE_OFF;
raw_spin_unlock_irq(&ctx->lock);

- event_function_call(event, __perf_event_enable,
- ___perf_event_enable, event);
+ event_function_call(event, __A_perf_event_enable,
+ __I_perf_event_enable, event);
}

/*
@@ -4091,7 +4091,7 @@ struct period_event {
u64 value;
};

-static void ___perf_event_period(void *info)
+static void __I_perf_event_period(void *info)
{
struct period_event *pe = info;
struct perf_event *event = pe->event;
@@ -4107,7 +4107,7 @@ static void ___perf_event_period(void *i
local64_set(&event->hw.period_left, 0);
}

-static int __perf_event_period(void *info)
+static int __A_perf_event_period(void *info)
{
struct period_event *pe = info;
struct perf_event *event = pe->event;
@@ -4159,8 +4159,8 @@ static int perf_event_period(struct perf

pe.value = value;

- event_function_call(event, __perf_event_period,
- ___perf_event_period, &pe);
+ event_function_call(event, __A_perf_event_period,
+ __I_perf_event_period, &pe);

return 0;
}
@@ -9226,7 +9226,7 @@ static void __perf_event_exit_context(vo

rcu_read_lock();
list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
- __perf_remove_from_context(&re);
+ __A_perf_remove_from_context(&re);
rcu_read_unlock();
}

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/