[PATCH 08/15] stackdepot: rename next_pool_required to new_pool_required

From: andrey . konovalov
Date: Tue Aug 29 2023 - 13:13:49 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Rename next_pool_required to new_pool_required.

This a purely code readability change: the following patch will change
stack depot to store the pointer to the new pool in a separate variable,
and "new" seems like a more logical name.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
lib/stackdepot.c | 47 +++++++++++++++++++++++------------------------
1 file changed, 23 insertions(+), 24 deletions(-)

diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 869d520bc690..11934ea3b1c2 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -94,12 +94,11 @@ static size_t pool_offset;
static DEFINE_RAW_SPINLOCK(pool_lock);
/*
* Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool.
- * This flag marks that this next extra pool needs to be allocated and
- * initialized. It has the value 0 when either the next pool is not yet
- * initialized or the limit on the number of pools is reached.
+ * of space in the currently used pool. This flag marks whether this extra pool
+ * needs to be allocated. It has the value 0 when either an extra pool is not
+ * yet allocated or if the limit on the number of pools is reached.
*/
-static int next_pool_required = 1;
+static int new_pool_required = 1;

static int __init disable_stack_depot(char *str)
{
@@ -220,20 +219,20 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);

-/* Keeps the preallocated memory to be used for the next stack depot pool. */
-static void depot_keep_next_pool(void **prealloc)
+/* Keeps the preallocated memory to be used for a new stack depot pool. */
+static void depot_keep_new_pool(void **prealloc)
{
/*
- * If the next pool is already saved or the maximum number of
+ * If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory.
* READ_ONCE is only used to mark the variable as atomic,
* there are no concurrent writes.
*/
- if (!READ_ONCE(next_pool_required))
+ if (!READ_ONCE(new_pool_required))
return;

/*
- * Use the preallocated memory for the next pool
+ * Use the preallocated memory for the new pool
* as long as we do not exceed the maximum number of pools.
*/
if (pool_index + 1 < DEPOT_MAX_POOLS) {
@@ -242,12 +241,12 @@ static void depot_keep_next_pool(void **prealloc)
}

/*
- * At this point, either the next pool is kept or the maximum
+ * At this point, either a new pool is kept or the maximum
* number of pools is reached. In either case, take note that
* keeping another pool is not required.
* smp_store_release pairs with smp_load_acquire in stack_depot_save.
*/
- smp_store_release(&next_pool_required, 0);
+ smp_store_release(&new_pool_required, 0);
}

/* Updates refences to the current and the next stack depot pools. */
@@ -262,7 +261,7 @@ static bool depot_update_pools(size_t required_size, void **prealloc)
}

/*
- * Move on to the next pool.
+ * Move on to the new pool.
* WRITE_ONCE pairs with potential concurrent read in
* stack_depot_fetch.
*/
@@ -271,12 +270,12 @@ static bool depot_update_pools(size_t required_size, void **prealloc)

/*
* If the maximum number of pools is not reached, take note
- * that the next pool needs to be initialized.
+ * that yet another new pool needs to be allocated.
* smp_store_release pairs with smp_load_acquire in
* stack_depot_save.
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
- smp_store_release(&next_pool_required, 1);
+ smp_store_release(&new_pool_required, 1);
}

/* Check if the current pool is not yet allocated. */
@@ -287,9 +286,9 @@ static bool depot_update_pools(size_t required_size, void **prealloc)
return true;
}

- /* Otherwise, try using the preallocated memory for the next pool. */
+ /* Otherwise, try using the preallocated memory for a new pool. */
if (*prealloc)
- depot_keep_next_pool(prealloc);
+ depot_keep_new_pool(prealloc);
return true;
}

@@ -300,7 +299,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
struct stack_record *stack;
size_t required_size = DEPOT_STACK_RECORD_SIZE;

- /* Update current and next pools if required and possible. */
+ /* Update current and new pools if required and possible. */
if (!depot_update_pools(required_size, prealloc))
return NULL;

@@ -432,13 +431,13 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
goto exit;

/*
- * Check if another stack pool needs to be initialized. If so, allocate
- * the memory now - we won't be able to do that under the lock.
+ * Check if another stack pool needs to be allocated. If so, allocate
+ * the memory now: we won't be able to do that under the lock.
*
* smp_load_acquire pairs with smp_store_release
- * in depot_update_pools and depot_keep_next_pool.
+ * in depot_update_pools and depot_keep_new_pool.
*/
- if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
+ if (unlikely(can_alloc && smp_load_acquire(&new_pool_required))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -471,9 +470,9 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
} else if (prealloc) {
/*
* Stack depot already contains this stack trace, but let's
- * keep the preallocated memory for the future.
+ * keep the preallocated memory for future.
*/
- depot_keep_next_pool(&prealloc);
+ depot_keep_new_pool(&prealloc);
}

raw_spin_unlock_irqrestore(&pool_lock, flags);
--
2.25.1