[patch V2 24/28] x86/perf/intel/rapl: Make pmu lock raw

From: Thomas Gleixner
Date: Mon Feb 22 2016 - 06:10:53 EST


This lock is taken in hard interrupt context even on Preempt-RT. Make it raw
so RT does not have to patch it.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/kernel/cpu/perf_event_intel_rapl.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

Index: b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
===================================================================
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -120,7 +120,7 @@ static struct perf_pmu_events_attr event
};

struct rapl_pmu {
- spinlock_t lock;
+ raw_spinlock_t lock;
int n_active;
struct list_head active_list;
struct pmu *pmu;
@@ -210,12 +210,12 @@ static enum hrtimer_restart rapl_hrtimer
if (!pmu->n_active)
return HRTIMER_NORESTART;

- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);

list_for_each_entry(event, &pmu->active_list, active_entry)
rapl_event_update(event);

- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);

hrtimer_forward_now(hrtimer, pmu->timer_interval);

@@ -252,9 +252,9 @@ static void rapl_pmu_event_start(struct
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
unsigned long flags;

- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);
__rapl_pmu_event_start(pmu, event);
- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);
}

static void rapl_pmu_event_stop(struct perf_event *event, int mode)
@@ -263,7 +263,7 @@ static void rapl_pmu_event_stop(struct p
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;

- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);

/* mark event as deactivated and stopped */
if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -288,7 +288,7 @@ static void rapl_pmu_event_stop(struct p
hwc->state |= PERF_HES_UPTODATE;
}

- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);
}

static int rapl_pmu_event_add(struct perf_event *event, int mode)
@@ -297,14 +297,14 @@ static int rapl_pmu_event_add(struct per
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;

- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);

hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;

if (mode & PERF_EF_START)
__rapl_pmu_event_start(pmu, event);

- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);

return 0;
}
@@ -567,7 +567,7 @@ static int rapl_cpu_prepare(int cpu)
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
return -1;
- spin_lock_init(&pmu->lock);
+ raw_spin_lock_init(&pmu->lock);

INIT_LIST_HEAD(&pmu->active_list);