[PATCH 07/29] dm zoned: dynamically allocate the dm-zoned-meta shrinker

From: Qi Zheng
Date: Thu Jun 22 2023 - 04:56:13 EST


In preparation for implementing lockless slab shrink,
we need to dynamically allocate the dm-zoned-meta shrinker,
so that it can be freed asynchronously using kfree_rcu().
Then it doesn't need to wait for RCU read-side critical
section when releasing the struct dmz_metadata.

Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>
---
drivers/md/dm-zoned-metadata.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 9d3cca8e3dc9..41b10ffb968a 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -187,7 +187,7 @@ struct dmz_metadata {
struct rb_root mblk_rbtree;
struct list_head mblk_lru_list;
struct list_head mblk_dirty_list;
- struct shrinker mblk_shrinker;
+ struct shrinker *mblk_shrinker;

/* Zone allocation management */
struct mutex map_lock;
@@ -615,7 +615,7 @@ static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
+ struct dmz_metadata *zmd = shrink->private_data;

return atomic_read(&zmd->nr_mblks);
}
@@ -626,7 +626,7 @@ static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
+ struct dmz_metadata *zmd = shrink->private_data;
unsigned long count;

spin_lock(&zmd->mblk_lock);
@@ -2936,17 +2936,22 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
*/
zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
- zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
- zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
- zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
+
+ zmd->mblk_shrinker = shrinker_alloc_and_init(dmz_mblock_shrinker_count,
+ dmz_mblock_shrinker_scan,
+ 0, DEFAULT_SEEKS, 0, zmd);
+ if (!zmd->mblk_shrinker) {
+ dmz_zmd_err(zmd, "allocate metadata cache shrinker failed");
+ goto err;
+ }

/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
+ ret = register_shrinker(zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
MAJOR(dev->bdev->bd_dev),
MINOR(dev->bdev->bd_dev));
if (ret) {
dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
- goto err;
+ goto err_shrinker;
}

dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
@@ -2982,6 +2987,8 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
*metadata = zmd;

return 0;
+err_shrinker:
+ shrinker_free(zmd->mblk_shrinker);
err:
dmz_cleanup_metadata(zmd);
kfree(zmd);
@@ -2995,7 +3002,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
*/
void dmz_dtr_metadata(struct dmz_metadata *zmd)
{
- unregister_shrinker(&zmd->mblk_shrinker);
+ unregister_and_free_shrinker(zmd->mblk_shrinker);
dmz_cleanup_metadata(zmd);
kfree(zmd);
}
--
2.30.2