small runqueue optimization, 2.1.30

Ingo Molnar (mingo@pc5829.hil.siemens.at)
Fri, 28 Mar 1997 16:03:05 +0100 (MET)


what it does:

this patch removes the nr_running++ nr_running-- code from
add_to_runqueue() and remove_from_runqueue(). A new function
nr_running() is added, which walks the runqueue to count runnable tasks.

why:

nr_running is rarely used, currently only in /proc/loadavg, thus it's
not worth counting runnable processes all the time

the patch uses the new SMP-safe locking semantics to access the runqueue.
i'm running a (uniprocessor) kernel with that patch now.

-- mingo

--- linux-2.1.30_vanilla/kernel/sched.c Fri Mar 28 12:32:36 1997
+++ linux/kernel/sched.c Fri Mar 28 16:41:58 1997
@@ -122,7 +122,6 @@
#endif
if (p->counter > current->counter + 3)
need_resched = 1;
- nr_running++;
(p->prev_run = init_task.prev_run)->next_run = p;
p->next_run = &init_task;
init_task.prev_run = p;
@@ -163,7 +162,6 @@
}
return;
}
- nr_running--;
next->prev_run = prev;
prev->next_run = next;
p->next_run = NULL;
@@ -199,6 +197,38 @@
static spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED;

/*
+ * Sometimes we want to know the number of runnable processes,
+ * but it's quite rare, thus we walk the runqueue instead of
+ * counting the sum in add_to_runqueue() remove_from_runqueue().
+ */
+
+int nr_running (void)
+{
+ struct task_struct *p = (&init_task)->next_run;
+ unsigned long flags;
+ int nr=0;
+
+ spin_lock_irqsave(&runqueue_lock, flags);
+
+ for (; p!=&init_task; p=p->next_run) {
+#if 0
+ /*
+ * calling nr_running is a good way to sanity-check
+ * the runqueue:
+ */
+ if ( (p->prev_run->next_run != p) ||
+ (p->next_run->prev_run != p) )
+ panic("run-queue corrupted!");
+#endif
+ nr++;
+ }
+
+ spin_unlock_irqrestore(&runqueue_lock, flags);
+
+ return nr;
+}
+
+/*
* Wake up a process. Put it on the run-queue if it's not
* already there. The "current" process is always on the
* run-queue (except when the actual re-schedule is in
@@ -1553,11 +1583,11 @@

asmlinkage int sys_sched_yield(void)
{
- spin_unlock(&scheduler_lock);
+ spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
move_last_runqueue(current);
spin_unlock_irq(&runqueue_lock);
- spin_lock(&scheduler_lock);
+ spin_unlock(&scheduler_lock);
need_resched = 1;
return 0;
}
--- linux-2.1.30_vanilla/kernel/fork.c Sun Jan 26 12:40:46 1997
+++ linux/kernel/fork.c Fri Mar 28 16:27:41 1997
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>

int nr_tasks=1;
-int nr_running=1;
unsigned long int total_forks=0; /* Handle normal Linux uptimes. */
int last_pid=0;

--- linux-2.1.30_vanilla/include/linux/sched.h Fri Mar 28 12:32:24 1997
+++ linux/include/linux/sched.h Fri Mar 28 16:39:10 1997
@@ -63,7 +63,8 @@
#define CT_TO_SECS(x) ((x) / HZ)
#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)

-extern int nr_running, nr_tasks;
+extern int nr_running (void);
+extern int nr_tasks;
extern int last_pid;

#define FIRST_TASK task[0]
--- linux-2.1.30_vanilla/fs/proc/array.c Fri Mar 28 12:31:55 1997
+++ linux/fs/proc/array.c Fri Mar 28 16:38:20 1997
@@ -188,7 +188,7 @@
LOAD_INT(a), LOAD_FRAC(a),
LOAD_INT(b), LOAD_FRAC(b),
LOAD_INT(c), LOAD_FRAC(c),
- nr_running, nr_tasks, last_pid);
+ nr_running(), nr_tasks, last_pid);
}

static int get_kstat(char * buffer)