[patch V3 25/28] x86/perf/intel/rapl: Utilize event->pmu_private

From: Thomas Gleixner
Date: Mon Feb 22 2016 - 17:21:52 EST


Store the pmu in event->pmu_private and use it instead of the per cpu
data. Preparatory step to get rid of the per cpu allocations. The usage sites
are the perf fast path, so we keep that even after the conversion to per
package storage as a cpu to package lookup involves 3 loads versus 1 with the
pmu_private pointer.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/kernel/cpu/perf_event_intel_rapl.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)

--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -122,6 +122,7 @@ static struct perf_pmu_events_attr event
struct rapl_pmu {
raw_spinlock_t lock;
int n_active;
+ int cpu;
struct list_head active_list;
struct pmu *pmu;
ktime_t timer_interval;
@@ -203,7 +204,7 @@ static void rapl_start_hrtimer(struct ra

static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
struct perf_event *event;
unsigned long flags;

@@ -249,7 +250,7 @@ static void __rapl_pmu_event_start(struc

static void rapl_pmu_event_start(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = event->pmu_private;
unsigned long flags;

raw_spin_lock_irqsave(&pmu->lock, flags);
@@ -259,7 +260,7 @@ static void rapl_pmu_event_start(struct

static void rapl_pmu_event_stop(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;

@@ -293,7 +294,7 @@ static void rapl_pmu_event_stop(struct p

static int rapl_pmu_event_add(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;

@@ -316,6 +317,7 @@ static void rapl_pmu_event_del(struct pe

static int rapl_pmu_event_init(struct perf_event *event)
{
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
u64 cfg = event->attr.config & RAPL_EVENT_MASK;
int bit, msr, ret = 0;

@@ -327,6 +329,9 @@ static int rapl_pmu_event_init(struct pe
if (event->attr.config & ~RAPL_EVENT_MASK)
return -EINVAL;

+ if (event->cpu < 0)
+ return -EINVAL;
+
/*
* check event is known (determines counter)
*/
@@ -365,6 +370,8 @@ static int rapl_pmu_event_init(struct pe
return -EINVAL;

/* must be done before validate_group */
+ event->cpu = pmu->cpu;
+ event->pmu_private = pmu;
event->hw.event_base = msr;
event->hw.config = cfg;
event->hw.idx = bit;
@@ -572,6 +579,7 @@ static int rapl_cpu_prepare(int cpu)
INIT_LIST_HEAD(&pmu->active_list);

pmu->pmu = &rapl_pmu_class;
+ pmu->cpu = cpu;

pmu->timer_interval = ms_to_ktime(rapl_timer_ms);