[PATCH 10/11] md: add atomic mode switching when I/O completion

From: tada keisuke
Date: Tue Mar 26 2024 - 06:32:40 EST


This patch depends on patch 09.

If percpu mode in softirq context, switch to atomic mode in delayed execution.
Move from softirq context to a context where RCUs are available and switch to atomic mode.
This patch completes the addition of atomic mode switching.

Signed-off-by: Keisuke TADA <keisuke1.tada@xxxxxxxxxx>
Signed-off-by: Toshifumi OHTAKE <toshifumi.ootake@xxxxxxxxxx>
---
drivers/md/md.c | 16 ++++++++++++++++
drivers/md/md.h | 6 ++++++
2 files changed, 22 insertions(+)

diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8c0c48a3a585..1c7b774dbb48 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9566,6 +9566,7 @@ void md_check_recovery(struct mddev *mddev)
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
+ test_bit(MD_RECOVERY_PERCPU, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
@@ -9576,6 +9577,21 @@ void md_check_recovery(struct mddev *mddev)

if (mddev_trylock(mddev)) {
bool try_set_sync = mddev->safemode != 0;
+ struct md_rdev *fault_rdev;
+ bool faulty_some = false;
+
+ rdev_for_each(fault_rdev, mddev) {
+ if (fault_rdev->raid_disk >= 0 &&
+ test_bit(Faulty, &fault_rdev->flags) &&
+ nr_pending_is_percpu_mode(fault_rdev)) {
+ percpu_ref_switch_to_atomic_sync(&fault_rdev->nr_pending);
+ faulty_some = true;
+ }
+ }
+ if (faulty_some) {
+ clear_bit(MD_RECOVERY_PERCPU, &mddev->recovery);
+ goto unlock;
+ }

if (!mddev->external && mddev->safemode == 1)
mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index ee84c4b5ee87..15e10205b578 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -605,6 +605,7 @@ enum recovery_flags {
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
+ MD_RECOVERY_PERCPU, /* nr_pending when faulty needs to be switched to atomic */
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};

@@ -886,6 +887,11 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
int faulty = test_bit(Faulty, &rdev->flags);
nr_pending_dec(rdev);
if (faulty) {
+ if (nr_pending_is_percpu_mode(rdev)) {
+ set_bit(MD_RECOVERY_PERCPU, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ return;
+ }
if (nr_pending_is_zero(rdev)) {
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
--
2.34.1