[RFC for-next v2 3/4] block/bio: add pcpu caching for non-polling bio_put

From: Pavel Begunkov
Date: Tue Oct 18 2022 - 15:53:02 EST


This patch extends REQ_ALLOC_CACHE to IRQ completions, whenever
currently it's only limited to iopoll. Instead of guarding the list with
irq toggling on alloc, which is expensive, it keeps an additional
irq-safe list from which bios are spliced in batches to ammortise
overhead. On the put side it toggles irqs, but in many cases they're
already disabled and so cheap.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
block/bio.c | 64 ++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 53 insertions(+), 11 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index ac16cc154476..c2dda2759df5 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -25,9 +25,15 @@
#include "blk-rq-qos.h"
#include "blk-cgroup.h"

+#define ALLOC_CACHE_THRESHOLD 16
+#define ALLOC_CACHE_SLACK 64
+#define ALLOC_CACHE_MAX 512
+
struct bio_alloc_cache {
struct bio *free_list;
+ struct bio *free_list_irq;
unsigned int nr;
+ unsigned int nr_irq;
};

static struct biovec_slab {
@@ -408,6 +414,22 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
queue_work(bs->rescue_workqueue, &bs->rescue_work);
}

+static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
+{
+ unsigned long flags;
+
+ /* cache->free_list must be empty */
+ if (WARN_ON_ONCE(cache->free_list))
+ return;
+
+ local_irq_save(flags);
+ cache->free_list = cache->free_list_irq;
+ cache->free_list_irq = NULL;
+ cache->nr += cache->nr_irq;
+ cache->nr_irq = 0;
+ local_irq_restore(flags);
+}
+
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
struct bio_set *bs)
@@ -417,9 +439,17 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,

cache = per_cpu_ptr(bs->cache, get_cpu());
if (!cache->free_list) {
- put_cpu();
- return NULL;
+ if (READ_ONCE(cache->nr_irq) < ALLOC_CACHE_THRESHOLD) {
+ put_cpu();
+ return NULL;
+ }
+ bio_alloc_irq_cache_splice(cache);
+ if (!cache->free_list) {
+ put_cpu();
+ return NULL;
+ }
}
+
bio = cache->free_list;
cache->free_list = bio->bi_next;
cache->nr--;
@@ -676,11 +706,8 @@ void guard_bio_eod(struct bio *bio)
bio_truncate(bio, maxsector << 9);
}

-#define ALLOC_CACHE_MAX 512
-#define ALLOC_CACHE_SLACK 64
-
-static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
- unsigned int nr)
+static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
+ unsigned int nr)
{
unsigned int i = 0;
struct bio *bio;
@@ -692,6 +719,17 @@ static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
if (++i == nr)
break;
}
+ return i;
+}
+
+static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
+ unsigned int nr)
+{
+ nr -= __bio_alloc_cache_prune(cache, nr);
+ if (!READ_ONCE(cache->free_list)) {
+ bio_alloc_irq_cache_splice(cache);
+ __bio_alloc_cache_prune(cache, nr);
+ }
}

static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
@@ -728,6 +766,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs)
static inline void bio_put_percpu_cache(struct bio *bio)
{
struct bio_alloc_cache *cache;
+ unsigned long flags;

cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
bio_uninit(bio);
@@ -737,12 +776,15 @@ static inline void bio_put_percpu_cache(struct bio *bio)
cache->free_list = bio;
cache->nr++;
} else {
- put_cpu();
- bio_free(bio);
- return;
+ local_irq_save(flags);
+ bio->bi_next = cache->free_list_irq;
+ cache->free_list_irq = bio;
+ cache->nr_irq++;
+ local_irq_restore(flags);
}

- if (cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+ if (READ_ONCE(cache->nr_irq) + cache->nr >
+ ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
put_cpu();
}
--
2.38.0