[PATCH 06/15] stackdepot: fix and clean-up atomic annotations

From: andrey . konovalov
Date: Tue Aug 29 2023 - 13:13:45 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Simplify comments accompanying the use of atomic accesses in the
stack depot code.

Also turn smp_load_acquire from next_pool_required in depot_init_pool
into READ_ONCE, as both depot_init_pool and the all smp_store_release's
to this variable are executed under the stack depot lock.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

---

This patch is not strictly required, as the atomic accesses are fully
removed in one of the latter patches. However, I decided to keep the
patch just in case we end up needing these atomics in the following
iterations of this series.
---
lib/stackdepot.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 93191ee70fc3..9ae71e1ef1a7 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -226,10 +226,10 @@ static void depot_init_pool(void **prealloc)
/*
* If the next pool is already initialized or the maximum number of
* pools is reached, do not use the preallocated memory.
- * smp_load_acquire() here pairs with smp_store_release() below and
- * in depot_alloc_stack().
+ * READ_ONCE is only used to mark the variable as atomic,
+ * there are no concurrent writes.
*/
- if (!smp_load_acquire(&next_pool_required))
+ if (!READ_ONCE(next_pool_required))
return;

/* Check if the current pool is not yet allocated. */
@@ -250,8 +250,8 @@ static void depot_init_pool(void **prealloc)
* At this point, either the next pool is initialized or the
* maximum number of pools is reached. In either case, take
* note that initializing another pool is not required.
- * This smp_store_release pairs with smp_load_acquire() above
- * and in stack_depot_save().
+ * smp_store_release pairs with smp_load_acquire in
+ * stack_depot_save.
*/
smp_store_release(&next_pool_required, 0);
}
@@ -275,15 +275,15 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
/*
* Move on to the next pool.
* WRITE_ONCE pairs with potential concurrent read in
- * stack_depot_fetch().
+ * stack_depot_fetch.
*/
WRITE_ONCE(pool_index, pool_index + 1);
pool_offset = 0;
/*
* If the maximum number of pools is not reached, take note
* that the next pool needs to initialized.
- * smp_store_release() here pairs with smp_load_acquire() in
- * stack_depot_save() and depot_init_pool().
+ * smp_store_release pairs with smp_load_acquire in
+ * stack_depot_save.
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_required, 1);
@@ -414,8 +414,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,

/*
* Fast path: look the stack trace up without locking.
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |bucket| below.
+ * smp_load_acquire pairs with smp_store_release to |bucket| below.
*/
found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
if (found)
@@ -425,8 +424,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* Check if another stack pool needs to be initialized. If so, allocate
* the memory now - we won't be able to do that under the lock.
*
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+ * smp_load_acquire pairs with smp_store_release
+ * in depot_alloc_stack and depot_init_pool.
*/
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
/*
@@ -452,8 +451,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
if (new) {
new->next = *bucket;
/*
- * This smp_store_release() pairs with
- * smp_load_acquire() from |bucket| above.
+ * smp_store_release pairs with smp_load_acquire
+ * from |bucket| above.
*/
smp_store_release(bucket, new);
found = new;
--
2.25.1