[PATCH] Fix Bug messages

From: Chirag Jog
Date: Wed Jul 30 2008 - 13:19:00 EST


* J?rgen Mell <j.mell@xxxxxxxxxxx> [2008-07-30 11:01:32]:

> Hello Thomas,
>
> On Wednesday, 30. July 2008, Thomas Gleixner wrote:
> > We are pleased to announce the 2.6.26-rt1 tree, which can be
> > downloaded from the location:
>
> I have tried the new kernel and have some good news and some bad news:
>
> The good news: The machine boots and seems to run without major problems.
>
> The bad news: It produces continuously lots of bug messages in the error
> logs (cf. attached dmesg.tgz). The error at rtmutex.c:743 was already
> present in 2.6.25-rt* when ACPI was enabled. The 'using smp_processor_id
> () in preemptible code' is new here with 2.6.26.
>
> Machine is an old Athlon XP (single core) on an EPOX mainboard with VIA
> chipset.
>
> If I can help with testing, please let me know.
>
> Bye,
> Jürgen
>
>
This patch should solve some of the bug messages.
It does two things:
1. Change rt_runtime_lock to be a raw spinlock as the comment above it
says: it is nested inside the rq lock.

2. Change mnt_writers to be a per_cpu locked variable.
This eliminates the need for the codepath to disable preemption and
then potentially sleep, leading to the BUG messages

Signed-Off-By: Chirag <chirag@xxxxxxxxxxxxxxxxxx>



Index: linux-2.6.26-rt1/kernel/sched.c
===================================================================
--- linux-2.6.26-rt1.orig/kernel/sched.c 2008-07-30 22:37:19.000000000 +0530
+++ linux-2.6.26-rt1/kernel/sched.c 2008-07-30 22:37:24.000000000 +0530
@@ -208,7 +208,7 @@

struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -472,7 +472,7 @@
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;

#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
Index: linux-2.6.26-rt1/fs/namespace.c
===================================================================
--- linux-2.6.26-rt1.orig/fs/namespace.c 2008-07-30 22:39:30.000000000 +0530
+++ linux-2.6.26-rt1/fs/namespace.c 2008-07-30 22:39:36.000000000 +0530
@@ -178,13 +178,13 @@
unsigned long count;
struct vfsmount *mnt;
} ____cacheline_aligned_in_smp;
-static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+static DEFINE_PER_CPU_LOCKED(struct mnt_writer, mnt_writers);

static int __init init_mnt_writers(void)
{
int cpu;
for_each_possible_cpu(cpu) {
- struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+ struct mnt_writer *writer = &per_cpu_var_locked(mnt_writers, cpu);
spin_lock_init(&writer->lock);
lockdep_set_class(&writer->lock, &writer->lock_class);
writer->count = 0;
@@ -199,7 +199,7 @@
struct mnt_writer *cpu_writer;

for_each_possible_cpu(cpu) {
- cpu_writer = &per_cpu(mnt_writers, cpu);
+ cpu_writer = &per_cpu_var_locked(mnt_writers, cpu);
spin_unlock(&cpu_writer->lock);
}
}
@@ -251,8 +251,8 @@
{
int ret = 0;
struct mnt_writer *cpu_writer;
-
- cpu_writer = &get_cpu_var(mnt_writers);
+ int cpu = 0;
+ cpu_writer = &get_cpu_var_locked(mnt_writers, &cpu);
spin_lock(&cpu_writer->lock);
if (__mnt_is_readonly(mnt)) {
ret = -EROFS;
@@ -262,7 +262,7 @@
cpu_writer->count++;
out:
spin_unlock(&cpu_writer->lock);
- put_cpu_var(mnt_writers);
+ put_cpu_var_locked(mnt_writers, cpu);
return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write);
@@ -273,7 +273,7 @@
struct mnt_writer *cpu_writer;

for_each_possible_cpu(cpu) {
- cpu_writer = &per_cpu(mnt_writers, cpu);
+ cpu_writer = &per_cpu_var_locked(mnt_writers, cpu);
spin_lock(&cpu_writer->lock);
__clear_mnt_count(cpu_writer);
cpu_writer->mnt = NULL;
@@ -332,8 +332,8 @@
{
int must_check_underflow = 0;
struct mnt_writer *cpu_writer;
-
- cpu_writer = &get_cpu_var(mnt_writers);
+ int cpu = 0;
+ cpu_writer = &get_cpu_var_locked(mnt_writers, &cpu);
spin_lock(&cpu_writer->lock);

use_cpu_writer_for_mount(cpu_writer, mnt);
@@ -360,7 +360,7 @@
* __mnt_writers can underflow. Without it,
* we could theoretically wrap __mnt_writers.
*/
- put_cpu_var(mnt_writers);
+ put_cpu_var_locked(mnt_writers, cpu);
}
EXPORT_SYMBOL_GPL(mnt_drop_write);

@@ -612,7 +612,7 @@
* can come in.
*/
for_each_possible_cpu(cpu) {
- struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
+ struct mnt_writer *cpu_writer = &per_cpu_var_locked(mnt_writers, cpu);
if (cpu_writer->mnt != mnt)
continue;
spin_lock(&cpu_writer->lock);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/