[PATCH 01/31] rhashtable: Allow rhashtable to be used from irq-safe contexts

From: Tejun Heo
Date: Wed Nov 30 2022 - 03:23:51 EST


rhashtable currently only does bh-safe synchronization making it impossible
to use from irq-safe contexts. Switch it to use irq-safe synchronization to
remove the restriction.

Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
Reviewed-by: David Vernet <dvernet@xxxxxxxx>
Acked-by: Josh Don <joshdon@xxxxxxxxxx>
Acked-by: Hao Luo <haoluo@xxxxxxxxxx>
Acked-by: Barret Rhoden <brho@xxxxxxxxxx>
---
include/linux/rhashtable.h | 51 ++++++++++++++++++++++----------------
lib/rhashtable.c | 16 +++++++-----
2 files changed, 39 insertions(+), 28 deletions(-)

diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 68dab3e08aad..785fba3464f2 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -324,28 +324,31 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
*/

static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+ struct rhash_lock_head __rcu **bkt,
+ unsigned long *flags)
{
- local_bh_disable();
+ local_irq_save(*flags);
bit_spin_lock(0, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
}

static inline void rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
- unsigned int subclass)
+ unsigned int subclass,
+ unsigned long *flags)
{
- local_bh_disable();
+ local_irq_save(*flags);
bit_spin_lock(0, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
}

static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+ struct rhash_lock_head __rcu **bkt,
+ unsigned long *flags)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
- local_bh_enable();
+ local_irq_restore(*flags);
}

static inline struct rhash_head *__rht_ptr(
@@ -393,7 +396,8 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,

static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
- struct rhash_head *obj)
+ struct rhash_head *obj,
+ unsigned long *flags)
{
if (rht_is_a_nulls(obj))
obj = NULL;
@@ -401,7 +405,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
- local_bh_enable();
+ local_irq_restore(*flags);
}

/**
@@ -706,6 +710,7 @@ static inline void *__rhashtable_insert_fast(
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
+ unsigned long flags;
unsigned int hash;
int elasticity;
void *data;
@@ -720,11 +725,11 @@ static inline void *__rhashtable_insert_fast(
if (!bkt)
goto out;
pprev = NULL;
- rht_lock(tbl, bkt);
+ rht_lock(tbl, bkt, &flags);

if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
rcu_read_unlock();
return rhashtable_insert_slow(ht, key, obj);
}
@@ -756,9 +761,9 @@ static inline void *__rhashtable_insert_fast(
RCU_INIT_POINTER(list->rhead.next, head);
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
} else
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, &flags);
data = NULL;
goto out;
}
@@ -785,7 +790,7 @@ static inline void *__rhashtable_insert_fast(
}

atomic_inc(&ht->nelems);
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, &flags);

if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
@@ -797,7 +802,7 @@ static inline void *__rhashtable_insert_fast(
return data;

out_unlock:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
goto out;
}

@@ -991,6 +996,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;

@@ -999,7 +1005,7 @@ static inline int __rhashtable_remove_fast_one(
if (!bkt)
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ rht_lock(tbl, bkt, &flags);

rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
@@ -1043,14 +1049,14 @@ static inline int __rhashtable_remove_fast_one(

if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
} else {
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, &flags);
}
goto unlocked;
}

- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
unlocked:
if (err > 0) {
atomic_dec(&ht->nelems);
@@ -1143,6 +1149,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;

@@ -1158,7 +1165,7 @@ static inline int __rhashtable_replace_fast(
return -ENOENT;

pprev = NULL;
- rht_lock(tbl, bkt);
+ rht_lock(tbl, bkt, &flags);

rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
@@ -1169,15 +1176,15 @@ static inline int __rhashtable_replace_fast(
rcu_assign_pointer(obj_new->next, obj_old->next);
if (pprev) {
rcu_assign_pointer(*pprev, obj_new);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
} else {
- rht_assign_unlock(tbl, bkt, obj_new);
+ rht_assign_unlock(tbl, bkt, obj_new, &flags);
}
err = 0;
goto unlocked;
}

- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);

unlocked:
return err;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e12bbfb240b8..9781572b2f31 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -231,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
struct rhash_head *head, *next, *entry;
struct rhash_head __rcu **pprev = NULL;
unsigned int new_hash;
+ unsigned long flags;

if (new_tbl->nest)
goto out;
@@ -253,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht,

new_hash = head_hashfn(ht, new_tbl, entry);

- rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
+ rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
+ SINGLE_DEPTH_NESTING, &flags);

head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);

RCU_INIT_POINTER(entry->next, head);

- rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
+ rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, &flags);

if (pprev)
rcu_assign_pointer(*pprev, next);
@@ -276,18 +278,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+ unsigned long flags;
int err;

if (!bkt)
return 0;
- rht_lock(old_tbl, bkt);
+ rht_lock(old_tbl, bkt, &flags);

while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
;

if (err == -ENOENT)
err = 0;
- rht_unlock(old_tbl, bkt);
+ rht_unlock(old_tbl, bkt, &flags);

return err;
}
@@ -590,6 +593,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
struct bucket_table *new_tbl;
struct bucket_table *tbl;
struct rhash_lock_head __rcu **bkt;
+ unsigned long flags;
unsigned int hash;
void *data;

@@ -607,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
- rht_lock(tbl, bkt);
+ rht_lock(tbl, bkt, &flags);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
@@ -615,7 +619,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);

- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, &flags);
}
} while (!IS_ERR_OR_NULL(new_tbl));

--
2.38.1