[PATCH] bcache: improve race performance of closure debug by rcu

From: Dawei Li
Date: Mon Sep 26 2022 - 12:14:09 EST


Closure debug system implements a closure list which is shared between
readers and writers, and a spinlock protecting the concurrency access
on it, which means that all readers and writers are mutual-exclusive,
which brings overhead to performance.

A rcu-based lock is introduced to solve the problem.

Signed-off-by: Dawei Li <set_pte_at@xxxxxxxxxxx>
---
drivers/md/bcache/closure.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index d8d9394a6beb..b019d6338589 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
+#include <linux/rculist.h>

#include "closure.h"

@@ -141,7 +142,7 @@ void closure_debug_create(struct closure *cl)
cl->magic = CLOSURE_MAGIC_ALIVE;

spin_lock_irqsave(&closure_list_lock, flags);
- list_add(&cl->all, &closure_list);
+ list_add_rcu(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}

@@ -153,7 +154,7 @@ void closure_debug_destroy(struct closure *cl)
cl->magic = CLOSURE_MAGIC_DEAD;

spin_lock_irqsave(&closure_list_lock, flags);
- list_del(&cl->all);
+ list_del_rcu(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}

@@ -163,9 +164,9 @@ static int debug_show(struct seq_file *f, void *data)
{
struct closure *cl;

- spin_lock_irq(&closure_list_lock);
+ rcu_read_lock();

- list_for_each_entry(cl, &closure_list, all) {
+ list_for_each_entry_rcu(cl, &closure_list, all) {
int r = atomic_read(&cl->remaining);

seq_printf(f, "%p: %pS -> %pS p %p r %i ",
@@ -184,7 +185,8 @@ static int debug_show(struct seq_file *f, void *data)
seq_printf(f, "\n");
}

- spin_unlock_irq(&closure_list_lock);
+ rcu_read_unlock();
+
return 0;
}

--
2.25.1