Re: [PATCH v9 02/13] block:rearrange bdev_{is_zoned,zone_sectors,get_queue} helpers in blkdev.h

From: Damien Le Moal
Date: Wed Aug 10 2022 - 12:50:33 EST


On 2022/08/03 2:47, Pankaj Raghav wrote:
> Define bdev_is_zoned(), bdev_zone_sectors() and bdev_get_queue() earlier
> in the blkdev.h include file.
>
> This commit has no functional change, and it is a prep patch for allowing
> zoned devices with non-power-of-2 zone sizes in the block layer.
>
> Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx>
> Suggested-by: Bart Van Assche <bvanassche@xxxxxxx>
> ---
> include/linux/blkdev.h | 48 +++++++++++++++++++++---------------------
> 1 file changed, 24 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index ab82d1ff0cce..22f97427b60b 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -635,6 +635,11 @@ static inline bool queue_is_mq(struct request_queue *q)
> return q->mq_ops;
> }
>
> +static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
> +{
> + return bdev->bd_queue; /* this is never NULL */
> +}
> +
> #ifdef CONFIG_PM
> static inline enum rpm_status queue_rpm_status(struct request_queue *q)
> {
> @@ -666,6 +671,25 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
> }
> }
>
> +static inline bool bdev_is_zoned(struct block_device *bdev)
> +{
> + struct request_queue *q = bdev_get_queue(bdev);
> +
> + if (q)
> + return blk_queue_is_zoned(q);

As noted with the comment in bdev_get_queue(), q is never null. So this all
could be simplified to:

return blk_queue_is_zoned(bdev_get_queue(bdev));

This could be done in a separate patch, or here as well.

> +
> + return false;
> +}
> +
> +static inline sector_t bdev_zone_sectors(struct block_device *bdev)
> +{
> + struct request_queue *q = bdev_get_queue(bdev);
> +
> + if (!blk_queue_is_zoned(q))
> + return 0;
> + return q->limits.chunk_sectors;
> +}
> +
> #ifdef CONFIG_BLK_DEV_ZONED
> static inline unsigned int disk_nr_zones(struct gendisk *disk)
> {
> @@ -892,11 +916,6 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
> int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
> unsigned int flags);
>
> -static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
> -{
> - return bdev->bd_queue; /* this is never NULL */
> -}
> -
> /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
> const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
>
> @@ -1296,25 +1315,6 @@ static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
> return BLK_ZONED_NONE;
> }
>
> -static inline bool bdev_is_zoned(struct block_device *bdev)
> -{
> - struct request_queue *q = bdev_get_queue(bdev);
> -
> - if (q)
> - return blk_queue_is_zoned(q);
> -
> - return false;
> -}
> -
> -static inline sector_t bdev_zone_sectors(struct block_device *bdev)
> -{
> - struct request_queue *q = bdev_get_queue(bdev);
> -
> - if (!blk_queue_is_zoned(q))
> - return 0;
> - return q->limits.chunk_sectors;
> -}
> -
> static inline int queue_dma_alignment(const struct request_queue *q)
> {
> return q ? q->dma_alignment : 511;


--
Damien Le Moal
Western Digital Research