Re: [PATCH] lightnvm: generalize rrpc ppa calculations

From: Matias BjÃrling
Date: Thu Feb 18 2016 - 02:57:54 EST


On 02/17/2016 12:00 PM, Javier GonzÃlez wrote:
> In rrpc, some calculations assume a certain configuration (e.g., 1 LUN,
> 1 sector per page). The reason behind this was that we have used a simple
> configuration in QEMU to test core features generally in LightNVM, and
> concretely in rrpc. This patch relaxes these assumptions and generalizes
> calculations in order to support real hardware. Note that more complex
> configurations in QEMU, that allow to simulate this hardware, are also
> pushed into the qemu-nvme repository implementing LightNVM support,
> available under the Open-Channel SSD project in github [1].
>
> [1] https://github.com/OpenChannelSSD/qemu-nvme
>
> Signed-off-by: Javier GonzÃlez <javier@xxxxxxxxxxxx>
> ---
> drivers/lightnvm/rrpc.c | 45 ++++++++++++++++++++++++++++-----------------
> drivers/lightnvm/rrpc.h | 9 +++++++++
> 2 files changed, 37 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index 775bf6c2..f366c78 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
>
> spin_lock(&rblk->lock);
>
> - div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
> + div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
> WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
> rblk->nr_invalid_pages++;
>
> @@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
>
> static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
> {
> - return (rblk->next_page == rrpc->dev->pgs_per_blk);
> + return (rblk->next_page == rrpc->dev->sec_per_blk);
> }
>
> +/* Calculate relative addr for the given block, considering instantiated LUNs */
> +static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
> +{
> + struct nvm_block *blk = rblk->parent;
> + int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
> +
> + return lun_blk * rrpc->dev->sec_per_blk;
> +}
> +
> +/* Calculate global addr for the given block */
> static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
> {
> struct nvm_block *blk = rblk->parent;
>
> - return blk->id * rrpc->dev->pgs_per_blk;
> + return blk->id * rrpc->dev->sec_per_blk;
> }
>
> static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
> @@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
> l.g.sec = secs;
>
> sector_div(ppa, dev->sec_per_pg);
> - div_u64_rem(ppa, dev->sec_per_blk, &pgs);
> + div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
> l.g.pg = pgs;
>
> sector_div(ppa, dev->pgs_per_blk);
> @@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
> return NULL;
> }
>
> - rblk = &rlun->blocks[blk->id];
> + rblk = rrpc_get_rblk(rlun, blk->id);
> list_add_tail(&rblk->list, &rlun->open_list);
> spin_unlock(&lun->lock);
>
> blk->priv = rblk;
> - bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
> + bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
> rblk->next_page = 0;
> rblk->nr_invalid_pages = 0;
> atomic_set(&rblk->data_cmnt_size, 0);
> @@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
> struct bio *bio;
> struct page *page;
> int slot;
> - int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
> + int nr_sec_per_blk = rrpc->dev->sec_per_blk;
> u64 phys_addr;
> DECLARE_COMPLETION_ONSTACK(wait);
>
> - if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
> + if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
> return 0;
>
> bio = bio_alloc(GFP_NOIO, 1);
> @@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
> }
>
> while ((slot = find_first_zero_bit(rblk->invalid_pages,
> - nr_pgs_per_blk)) < nr_pgs_per_blk) {
> + nr_sec_per_blk)) < nr_sec_per_blk) {
>
> /* Lock laddr */
> - phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
> + phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
>
> try:
> spin_lock(&rrpc->rev_lock);
> @@ -381,7 +391,7 @@ finished:
> mempool_free(page, rrpc->page_pool);
> bio_put(bio);
>
> - if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
> + if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
> pr_err("nvm: failed to garbage collect block\n");
> return -EIO;
> }
> @@ -677,7 +687,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
> lun = rblk->parent->lun;
>
> cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
> - if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
> + if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
> rrpc_run_gc(rrpc, rblk);
> }
> }
> @@ -1030,7 +1040,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
> continue;
>
> addr[i].addr = pba;
> - raddr[pba].addr = slba + i;
> + raddr[pba % rrpc->nr_sects].addr = slba + i;
> }
>
> return 0;
> @@ -1137,7 +1147,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
> struct rrpc_lun *rlun;
> int i, j;
>
> - if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
> + if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
> pr_err("rrpc: number of pages per block too high.");
> return -EINVAL;
> }
> @@ -1238,10 +1248,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
> struct nvm_dev *dev = rrpc->dev;
> int offset;
> struct rrpc_addr *laddr;
> - u64 paddr, pladdr;
> + u64 bpaddr, paddr, pladdr;
>
> - for (offset = 0; offset < dev->pgs_per_blk; offset++) {
> - paddr = block_to_addr(rrpc, rblk) + offset;
> + bpaddr = block_to_rel_addr(rrpc, rblk);
> + for (offset = 0; offset < dev->sec_per_blk; offset++) {
> + paddr = bpaddr + offset;
>
> pladdr = rrpc->rev_trans_map[paddr].addr;
> if (pladdr == ADDR_EMPTY)
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index 3989d65..855f4a5 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -156,6 +156,15 @@ struct rrpc_rev_addr {
> u64 addr;
> };
>
> +static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
> + int blk_id)
> +{
> + struct rrpc *rrpc = rlun->rrpc;
> + int lun_blk = blk_id % rrpc->dev->blks_per_lun;
> +
> + return &rlun->blocks[lun_blk];
> +}
> +
> static inline sector_t rrpc_get_laddr(struct bio *bio)
> {
> return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
>

Thanks Javier, applied.

-Matias