summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/rrpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lightnvm/rrpc.c')
-rw-r--r--drivers/lightnvm/rrpc.c514
1 files changed, 333 insertions, 181 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 37fcaadbf80c..9fb7de395915 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -28,6 +28,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk = a->rblk;
unsigned int pg_offset;
@@ -38,13 +39,13 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
+ div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+ rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -116,62 +117,35 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->sec_per_blk);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
+ return (rblk->next_page == dev->geo.sec_per_blk);
}
/* Calculate relative addr for the given block, considering instantiated LUNs */
static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_block *blk = rblk->parent;
- int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
-
- return lun_blk * rrpc->dev->sec_per_blk;
-}
-
-/* Calculate global addr for the given block */
-static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_block *blk = rblk->parent;
-
- return blk->id * rrpc->dev->sec_per_blk;
-}
-
-static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun = rblk->rlun;
- return l;
+ return rlun->id * dev->geo.sec_per_blk;
}
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
+ struct rrpc_addr *gp)
{
+ struct rrpc_block *rblk = gp->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
+ u64 addr = gp->addr;
struct ppa_addr paddr;
paddr.ppa = addr;
- return linear_to_generic_addr(dev, paddr);
+ paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
+ paddr.g.ch = rlun->bppa.g.ch;
+ paddr.g.lun = rlun->bppa.g.lun;
+ paddr.g.blk = rblk->id;
+
+ return paddr;
}
/* requires lun->lock taken */
@@ -188,21 +162,47 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
*cur_rblk = new_rblk;
}
+static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
+ struct rrpc_lun *rlun)
+{
+ struct rrpc_block *rblk = NULL;
+
+ if (list_empty(&rlun->free_list))
+ goto out;
+
+ rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
+
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+
+out:
+ return rblk;
+}
+
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_block *blk;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block *rblk;
+ int is_gc = flags & NVM_IOTYPE_GC;
+
+ spin_lock(&rlun->lock);
+ if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
+ pr_err("nvm: rrpc: cannot give block to non GC request\n");
+ spin_unlock(&rlun->lock);
+ return NULL;
+ }
- blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
- if (!blk) {
- pr_err("nvm: rrpc: cannot get new block from media manager\n");
+ rblk = __rrpc_get_blk(rrpc, rlun);
+ if (!rblk) {
+ pr_err("nvm: rrpc: cannot get new block\n");
+ spin_unlock(&rlun->lock);
return NULL;
}
+ spin_unlock(&rlun->lock);
- rblk = rrpc_get_rblk(rlun, blk->id);
- blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
+ bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -212,7 +212,24 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- nvm_put_blk(rrpc->dev, rblk->parent);
+ struct rrpc_lun *rlun = rblk->rlun;
+
+ spin_lock(&rlun->lock);
+ if (rblk->state & NVM_BLK_ST_TGT) {
+ list_move_tail(&rblk->list, &rlun->free_list);
+ rlun->nr_free_blocks++;
+ rblk->state = NVM_BLK_ST_FREE;
+ } else if (rblk->state & NVM_BLK_ST_BAD) {
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ } else {
+ WARN_ON_ONCE(1);
+ pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id, rblk->state);
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ }
+ spin_unlock(&rlun->lock);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -280,13 +297,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
*/
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct request_queue *q = rrpc->dev->q;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct request_queue *q = dev->q;
struct rrpc_rev_addr *rev;
struct nvm_rq *rqd;
struct bio *bio;
struct page *page;
int slot;
- int nr_sec_per_blk = rrpc->dev->sec_per_blk;
+ int nr_sec_per_blk = dev->geo.sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
@@ -309,12 +327,12 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
+ phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
try:
spin_lock(&rrpc->rev_lock);
/* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[phys_addr];
/* already updated by previous regular write */
if (rev->addr == ADDR_EMPTY) {
spin_unlock(&rrpc->rev_lock);
@@ -396,15 +414,23 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct ppa_addr ppa;
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
if (rrpc_move_valid_pages(rrpc, rblk))
goto put_back;
- if (nvm_erase_blk(dev, rblk->parent))
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+ ppa.g.blk = rblk->id;
+
+ if (nvm_erase_blk(dev, &ppa, 0))
goto put_back;
rrpc_put_blk(rrpc, rblk);
@@ -420,7 +446,7 @@ put_back:
/* the block with highest number of invalid pages, will be in the beginning
* of the list
*/
-static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
struct rrpc_block *rb)
{
if (ra->nr_invalid_pages == rb->nr_invalid_pages)
@@ -435,13 +461,13 @@ static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblock, *max;
+ struct rrpc_block *rblk, *max;
BUG_ON(list_empty(prio_list));
max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblock, prio_list, prio)
- max = rblock_max_invalid(max, rblock);
+ list_for_each_entry(rblk, prio_list, prio)
+ max = rblk_max_invalid(max, rblk);
return max;
}
@@ -450,36 +476,37 @@ static void rrpc_lun_gc(struct work_struct *work)
{
struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
struct rrpc *rrpc = rlun->rrpc;
- struct nvm_lun *lun = rlun->parent;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_block_gc *gcb;
unsigned int nr_blocks_need;
- nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
+ nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
if (nr_blocks_need < rrpc->nr_luns)
nr_blocks_need = rrpc->nr_luns;
spin_lock(&rlun->lock);
- while (nr_blocks_need > lun->nr_free_blocks &&
+ while (nr_blocks_need > rlun->nr_free_blocks &&
!list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblock = block_prio_find_max(rlun);
- struct nvm_block *block = rblock->parent;
+ struct rrpc_block *rblk = block_prio_find_max(rlun);
- if (!rblock->nr_invalid_pages)
+ if (!rblk->nr_invalid_pages)
break;
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
if (!gcb)
break;
- list_del_init(&rblock->prio);
+ list_del_init(&rblk->prio);
- BUG_ON(!block_is_full(rrpc, rblock));
+ WARN_ON(!block_is_full(rrpc, rblk));
- pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+ pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
gcb->rrpc = rrpc;
- gcb->rblk = rblock;
+ gcb->rblk = rblk;
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
@@ -504,8 +531,9 @@ static void rrpc_gc_queue(struct work_struct *work)
spin_unlock(&rlun->lock);
mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
- rblk->parent->id);
+ pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
+ rlun->bppa.g.ch, rlun->bppa.g.lun,
+ rblk->id);
}
static const struct block_device_operations rrpc_fops = {
@@ -529,8 +557,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
* estimate.
*/
rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->parent->nr_free_blocks >
- max_free->parent->nr_free_blocks)
+ if (rlun->nr_free_blocks > max_free->nr_free_blocks)
max_free = rlun;
}
@@ -553,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
gp->addr = paddr;
gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+ rev = &rrpc->rev_trans_map[gp->addr];
rev->addr = laddr;
spin_unlock(&rrpc->rev_lock);
@@ -568,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
if (block_is_full(rrpc, rblk))
goto out;
- addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+ addr = rblk->next_page;
rblk->next_page++;
out:
@@ -582,20 +609,22 @@ out:
* Returns rrpc_addr with the physical address and block. Returns NULL if no
* blocks in the next rlun are available.
*/
-static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc)
{
+ struct nvm_tgt_dev *tgt_dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk, **cur_rblk;
- struct nvm_lun *lun;
+ struct rrpc_addr *p;
+ struct ppa_addr ppa;
u64 paddr;
int gc_force = 0;
+ ppa.ppa = ADDR_EMPTY;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
- lun = rlun->parent;
- if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
- return NULL;
+ if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
+ return ppa;
/*
* page allocation steps:
@@ -652,10 +681,15 @@ new_blk:
}
pr_err("rrpc: failed to allocate new block\n");
- return NULL;
+ return ppa;
done:
spin_unlock(&rlun->lock);
- return rrpc_update_map(rrpc, laddr, rblk, paddr);
+ p = rrpc_update_map(rrpc, laddr, rblk, paddr);
+ if (!p)
+ return ppa;
+
+ /* return global address */
+ return rrpc_ppa_to_gaddr(tgt_dev, p);
}
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -675,21 +709,70 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
queue_work(rrpc->kgc_wq, &gcb->ws_gc);
}
+static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
+{
+ struct rrpc_lun *rlun = NULL;
+ int i;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
+ rrpc->luns[i].bppa.g.lun == p.g.lun) {
+ rlun = &rrpc->luns[i];
+ break;
+ }
+ }
+
+ return rlun;
+}
+
+static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
+
+ rlun = rrpc_ppa_to_lun(rrpc, ppa);
+ rblk = &rlun->blocks[ppa.g.blk];
+ rblk->state = NVM_BLK_ST_BAD;
+
+ nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+ void *comp_bits = &rqd->ppa_status;
+ struct ppa_addr ppa, prev_ppa;
+ int nr_ppas = rqd->nr_ppas;
+ int bit;
+
+ if (rqd->nr_ppas == 1)
+ __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
+
+ ppa_set_empty(&prev_ppa);
+ bit = -1;
+ while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+ ppa = rqd->ppa_list[bit];
+ if (ppa_cmp_blk(ppa, prev_ppa))
+ continue;
+
+ __rrpc_mark_bad_block(rrpc, ppa);
+ }
+}
+
static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
sector_t laddr, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *p;
struct rrpc_block *rblk;
- struct nvm_lun *lun;
int cmnt_size, i;
for (i = 0; i < npages; i++) {
p = &rrpc->trans_map[laddr + i];
rblk = p->rblk;
- lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
+ if (unlikely(cmnt_size == dev->geo.sec_per_blk))
rrpc_run_gc(rrpc, rblk);
}
}
@@ -697,12 +780,17 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
- if (bio_data_dir(rqd->bio) == WRITE)
+ if (bio_data_dir(rqd->bio) == WRITE) {
+ if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+ rrpc_mark_bad_block(rrpc, rqd);
+
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+ }
bio_put(rqd->bio);
@@ -712,7 +800,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
rrpc_unlock_rq(rrpc, rqd);
if (npages > 1)
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
mempool_free(rqd, rrpc->rq_pool);
}
@@ -720,6 +808,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
struct rrpc_addr *gp;
sector_t laddr = rrpc_get_laddr(bio);
@@ -727,7 +816,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
@@ -737,12 +826,11 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- gp->addr);
+ rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
return NVM_IO_DONE;
}
@@ -756,7 +844,6 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
struct rrpc_addr *gp;
@@ -768,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+ rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
} else {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
@@ -776,7 +863,6 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
}
rqd->opcode = NVM_OP_HBREAD;
- rrqd->addr = gp;
return NVM_IO_OK;
}
@@ -784,31 +870,31 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, int npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
sector_t laddr = rrpc_get_laddr(bio);
int is_gc = flags & NVM_IOTYPE_GC;
int i;
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
return NVM_IO_REQUEUE;
}
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
rqd->dma_ppa_list);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- p->addr);
+ rqd->ppa_list[i] = p;
}
rqd->opcode = NVM_OP_HBWRITE;
@@ -819,8 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- struct rrpc_addr *p;
+ struct ppa_addr p;
int is_gc = flags & NVM_IOTYPE_GC;
sector_t laddr = rrpc_get_laddr(bio);
@@ -828,16 +913,15 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_REQUEUE;
p = rrpc_map_page(rrpc, laddr, is_gc);
- if (!p) {
+ if (p.ppa == ADDR_EMPTY) {
BUG_ON(is_gc);
rrpc_unlock_rq(rrpc, rqd);
rrpc_gc_kick(rrpc);
return NVM_IO_REQUEUE;
}
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+ rqd->ppa_addr = p;
rqd->opcode = NVM_OP_HBWRITE;
- rrqd->addr = p;
return NVM_IO_OK;
}
@@ -845,8 +929,10 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+
if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
+ rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("rrpc: not able to allocate ppa list\n");
@@ -869,14 +955,15 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd, unsigned long flags)
{
- int err;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
uint8_t nr_pages = rrpc_get_pages(bio);
int bio_size = bio_sectors(bio) << 9;
+ int err;
- if (bio_size < rrpc->dev->sec_size)
+ if (bio_size < dev->geo.sec_size)
return NVM_IO_ERR;
- else if (bio_size > rrpc->dev->max_rq_size)
+ else if (bio_size > dev->geo.max_rq_size)
return NVM_IO_ERR;
err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
@@ -889,15 +976,15 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
rqd->nr_ppas = nr_pages;
rrq->flags = flags;
- err = nvm_submit_io(rrpc->dev, rqd);
+ err = nvm_submit_io(dev, rqd);
if (err) {
pr_err("rrpc: I/O submission failed: %d\n", err);
bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd);
if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(rrpc->dev,
- rqd->ppa_list, rqd->dma_ppa_list);
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list,
+ rqd->dma_ppa_list);
}
return NVM_IO_ERR;
}
@@ -911,6 +998,8 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_op(bio) == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
@@ -997,25 +1086,24 @@ static void rrpc_map_free(struct rrpc *rrpc)
static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- u64 elba = slba + nlb;
+ struct rrpc_lun *rlun;
+ struct rrpc_block *rblk;
u64 i;
- if (unlikely(elba > dev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
-
for (i = 0; i < nlb; i++) {
+ struct ppa_addr gaddr;
u64 pba = le64_to_cpu(entries[i]);
unsigned int mod;
+
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
+ pr_err("nvm: Maybe loaded an old target L2P\n");
return -EINVAL;
}
@@ -1028,7 +1116,27 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
div_u64_rem(pba, rrpc->nr_sects, &mod);
+ gaddr = rrpc_recov_addr(dev, pba);
+ rlun = rrpc_ppa_to_lun(rrpc, gaddr);
+ if (!rlun) {
+ pr_err("rrpc: l2p corruption on lba %llu\n",
+ slba + i);
+ return -EINVAL;
+ }
+
+ rblk = &rlun->blocks[gaddr.g.blk];
+ if (!rblk->state) {
+ /* at this point, we don't know anything about the
+ * block. It's up to the FTL on top to re-etablish the
+ * block state. The block is assumed to be open.
+ */
+ list_move_tail(&rblk->list, &rlun->used_list);
+ rblk->state = NVM_BLK_ST_TGT;
+ rlun->nr_free_blocks--;
+ }
+
addr[i].addr = pba;
+ addr[i].rblk = rblk;
raddr[mod].addr = slba + i;
}
@@ -1037,7 +1145,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
static int rrpc_map_init(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t i;
int ret;
@@ -1058,12 +1166,9 @@ static int rrpc_map_init(struct rrpc *rrpc)
r->addr = ADDR_EMPTY;
}
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
+ ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+ rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1102,7 +1207,7 @@ static int rrpc_core_init(struct rrpc *rrpc)
if (!rrpc->page_pool)
return -ENOMEM;
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
+ rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
rrpc_gcb_cache);
if (!rrpc->gcb_pool)
return -ENOMEM;
@@ -1126,8 +1231,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun;
struct rrpc_lun *rlun;
int i;
@@ -1136,23 +1239,74 @@ static void rrpc_luns_free(struct rrpc *rrpc)
for (i = 0; i < rrpc->nr_luns; i++) {
rlun = &rrpc->luns[i];
- lun = rlun->parent;
- if (!lun)
- break;
- dev->mt->release_lun(dev, lun->id);
vfree(rlun->blocks);
}
kfree(rrpc->luns);
}
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct rrpc_block *rblk;
+ struct ppa_addr ppa;
+ u8 *blks;
+ int nr_blks;
+ int i;
+ int ret;
+
+ if (!dev->parent->ops->get_bb_tbl)
+ return 0;
+
+ nr_blks = geo->blks_per_lun * geo->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
+
+ ppa.ppa = 0;
+ ppa.g.ch = rlun->bppa.g.ch;
+ ppa.g.lun = rlun->bppa.g.lun;
+
+ ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
+ if (ret) {
+ pr_err("rrpc: could not get BB table\n");
+ goto out;
+ }
+
+ nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_FREE)
+ continue;
+
+ rblk = &rlun->blocks[i];
+ list_move_tail(&rblk->list, &rlun->bb_list);
+ rblk->state = NVM_BLK_ST_BAD;
+ rlun->nr_free_blocks--;
+ }
+
+out:
+ kfree(blks);
+ return ret;
+}
+
+static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
+{
+ rlun->bppa.ppa = 0;
+ rlun->bppa.g.ch = ppa.g.ch;
+ rlun->bppa.g.lun = ppa.g.lun;
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc_lun *rlun;
int i, j, ret = -EINVAL;
- if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1166,43 +1320,46 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- int lunid = lun_begin + i;
- struct nvm_lun *lun;
-
- if (dev->mt->reserve_lun(dev, lunid)) {
- pr_err("rrpc: lun %u is already allocated\n", lunid);
- goto err;
- }
-
- lun = dev->mt->get_lun(dev, lunid);
- if (!lun)
- goto err;
-
rlun = &rrpc->luns[i];
- rlun->parent = lun;
+ rlun->id = i;
+ rrpc_set_lun_ppa(rlun, luns[i]);
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
+ geo->blks_per_lun);
if (!rlun->blocks) {
ret = -ENOMEM;
goto err;
}
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
+ INIT_LIST_HEAD(&rlun->free_list);
+ INIT_LIST_HEAD(&rlun->used_list);
+ INIT_LIST_HEAD(&rlun->bb_list);
+
+ for (j = 0; j < geo->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
- rblk->parent = blk;
+ rblk->id = j;
rblk->rlun = rlun;
+ rblk->state = NVM_BLK_T_FREE;
INIT_LIST_HEAD(&rblk->prio);
+ INIT_LIST_HEAD(&rblk->list);
spin_lock_init(&rblk->lock);
+
+ list_add_tail(&rblk->list, &rlun->free_list);
}
rlun->rrpc = rrpc;
+ rlun->nr_free_blocks = geo->blks_per_lun;
+ rlun->reserved_blocks = 2; /* for GC only */
+
INIT_LIST_HEAD(&rlun->prio_list);
INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
+
+ if (rrpc_bb_discovery(dev, rlun))
+ goto err;
+
}
return 0;
@@ -1213,27 +1370,25 @@ err:
/* returns 0 on success and stores the beginning address in *begin */
static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t size = rrpc->nr_sects * dev->sec_size;
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t size = rrpc->nr_sects * dev->geo.sec_size;
int ret;
size >>= 9;
- ret = mt->get_area(dev, begin, size);
+ ret = nvm_get_area(dev, begin, size);
if (!ret)
- *begin >>= (ilog2(dev->sec_size) - 9);
+ *begin >>= (ilog2(dev->geo.sec_size) - 9);
return ret;
}
static void rrpc_area_free(struct rrpc *rrpc)
{
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
- mt->put_area(dev, begin);
+ nvm_put_area(dev, begin);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1262,11 +1417,11 @@ static void rrpc_exit(void *private)
static sector_t rrpc_capacity(void *private)
{
struct rrpc *rrpc = private;
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
sector_t reserved, provisioned;
/* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
+ reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
provisioned = rrpc->nr_sects - reserved;
if (reserved > rrpc->nr_sects) {
@@ -1285,13 +1440,13 @@ static sector_t rrpc_capacity(void *private)
*/
static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct nvm_dev *dev = rrpc->dev;
+ struct nvm_tgt_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
u64 bpaddr, paddr, pladdr;
bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
@@ -1311,6 +1466,7 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
static int rrpc_blocks_init(struct rrpc *rrpc)
{
+ struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
struct rrpc_block *rblk;
int lun_iter, blk_iter;
@@ -1318,7 +1474,7 @@ static int rrpc_blocks_init(struct rrpc *rrpc)
for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
rlun = &rrpc->luns[lun_iter];
- for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
+ for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
blk_iter++) {
rblk = &rlun->blocks[blk_iter];
rrpc_block_map_update(rrpc, rblk);
@@ -1357,11 +1513,11 @@ err:
static struct nvm_tgt_type tt_rrpc;
-static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
- int lun_begin, int lun_end)
+static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
{
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
+ struct nvm_geo *geo = &dev->geo;
struct rrpc *rrpc;
sector_t soffset;
int ret;
@@ -1384,9 +1540,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
spin_lock_init(&rrpc->bio_lock);
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
- rrpc->nr_luns = lun_end - lun_begin + 1;
- rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
- rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
+ rrpc->nr_luns = geo->nr_luns;
+ rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
@@ -1398,15 +1553,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
}
rrpc->soffset = soffset;
- ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+ ret = rrpc_luns_init(rrpc, dev->luns);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
goto err;
}
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
-
ret = rrpc_core_init(rrpc);
if (ret) {
pr_err("nvm: rrpc: could not initialize core\n");