• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16 
17 #include "rrpc.h"
18 
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21 
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 				struct nvm_rq *rqd, unsigned long flags);
24 
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 		for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 			(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28 
rrpc_page_invalidate(struct rrpc * rrpc,struct rrpc_addr * a)29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31 	struct nvm_tgt_dev *dev = rrpc->dev;
32 	struct rrpc_block *rblk = a->rblk;
33 	unsigned int pg_offset;
34 
35 	lockdep_assert_held(&rrpc->rev_lock);
36 
37 	if (a->addr == ADDR_EMPTY || !rblk)
38 		return;
39 
40 	spin_lock(&rblk->lock);
41 
42 	div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
43 	WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
44 	rblk->nr_invalid_pages++;
45 
46 	spin_unlock(&rblk->lock);
47 
48 	rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
49 }
50 
rrpc_invalidate_range(struct rrpc * rrpc,sector_t slba,unsigned int len)51 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
52 							unsigned int len)
53 {
54 	sector_t i;
55 
56 	spin_lock(&rrpc->rev_lock);
57 	for (i = slba; i < slba + len; i++) {
58 		struct rrpc_addr *gp = &rrpc->trans_map[i];
59 
60 		rrpc_page_invalidate(rrpc, gp);
61 		gp->rblk = NULL;
62 	}
63 	spin_unlock(&rrpc->rev_lock);
64 }
65 
rrpc_inflight_laddr_acquire(struct rrpc * rrpc,sector_t laddr,unsigned int pages)66 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
67 					sector_t laddr, unsigned int pages)
68 {
69 	struct nvm_rq *rqd;
70 	struct rrpc_inflight_rq *inf;
71 
72 	rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
73 	if (!rqd)
74 		return ERR_PTR(-ENOMEM);
75 
76 	inf = rrpc_get_inflight_rq(rqd);
77 	if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
78 		mempool_free(rqd, rrpc->rq_pool);
79 		return NULL;
80 	}
81 
82 	return rqd;
83 }
84 
rrpc_inflight_laddr_release(struct rrpc * rrpc,struct nvm_rq * rqd)85 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
86 {
87 	struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
88 
89 	rrpc_unlock_laddr(rrpc, inf);
90 
91 	mempool_free(rqd, rrpc->rq_pool);
92 }
93 
rrpc_discard(struct rrpc * rrpc,struct bio * bio)94 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
95 {
96 	sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
97 	sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
98 	struct nvm_rq *rqd;
99 
100 	while (1) {
101 		rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
102 		if (rqd)
103 			break;
104 
105 		schedule();
106 	}
107 
108 	if (IS_ERR(rqd)) {
109 		pr_err("rrpc: unable to acquire inflight IO\n");
110 		bio_io_error(bio);
111 		return;
112 	}
113 
114 	rrpc_invalidate_range(rrpc, slba, len);
115 	rrpc_inflight_laddr_release(rrpc, rqd);
116 }
117 
block_is_full(struct rrpc * rrpc,struct rrpc_block * rblk)118 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
119 {
120 	struct nvm_tgt_dev *dev = rrpc->dev;
121 
122 	return (rblk->next_page == dev->geo.sec_per_blk);
123 }
124 
125 /* Calculate relative addr for the given block, considering instantiated LUNs */
block_to_rel_addr(struct rrpc * rrpc,struct rrpc_block * rblk)126 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
127 {
128 	struct nvm_tgt_dev *dev = rrpc->dev;
129 	struct rrpc_lun *rlun = rblk->rlun;
130 
131 	return rlun->id * dev->geo.sec_per_blk;
132 }
133 
rrpc_ppa_to_gaddr(struct nvm_tgt_dev * dev,struct rrpc_addr * gp)134 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
135 					 struct rrpc_addr *gp)
136 {
137 	struct rrpc_block *rblk = gp->rblk;
138 	struct rrpc_lun *rlun = rblk->rlun;
139 	u64 addr = gp->addr;
140 	struct ppa_addr paddr;
141 
142 	paddr.ppa = addr;
143 	paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
144 	paddr.g.ch = rlun->bppa.g.ch;
145 	paddr.g.lun = rlun->bppa.g.lun;
146 	paddr.g.blk = rblk->id;
147 
148 	return paddr;
149 }
150 
151 /* requires lun->lock taken */
rrpc_set_lun_cur(struct rrpc_lun * rlun,struct rrpc_block * new_rblk,struct rrpc_block ** cur_rblk)152 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
153 						struct rrpc_block **cur_rblk)
154 {
155 	struct rrpc *rrpc = rlun->rrpc;
156 
157 	if (*cur_rblk) {
158 		spin_lock(&(*cur_rblk)->lock);
159 		WARN_ON(!block_is_full(rrpc, *cur_rblk));
160 		spin_unlock(&(*cur_rblk)->lock);
161 	}
162 	*cur_rblk = new_rblk;
163 }
164 
__rrpc_get_blk(struct rrpc * rrpc,struct rrpc_lun * rlun)165 static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
166 							struct rrpc_lun *rlun)
167 {
168 	struct rrpc_block *rblk = NULL;
169 
170 	if (list_empty(&rlun->free_list))
171 		goto out;
172 
173 	rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
174 
175 	list_move_tail(&rblk->list, &rlun->used_list);
176 	rblk->state = NVM_BLK_ST_TGT;
177 	rlun->nr_free_blocks--;
178 
179 out:
180 	return rblk;
181 }
182 
rrpc_get_blk(struct rrpc * rrpc,struct rrpc_lun * rlun,unsigned long flags)183 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
184 							unsigned long flags)
185 {
186 	struct nvm_tgt_dev *dev = rrpc->dev;
187 	struct rrpc_block *rblk;
188 	int is_gc = flags & NVM_IOTYPE_GC;
189 
190 	spin_lock(&rlun->lock);
191 	if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
192 		pr_err("nvm: rrpc: cannot give block to non GC request\n");
193 		spin_unlock(&rlun->lock);
194 		return NULL;
195 	}
196 
197 	rblk = __rrpc_get_blk(rrpc, rlun);
198 	if (!rblk) {
199 		pr_err("nvm: rrpc: cannot get new block\n");
200 		spin_unlock(&rlun->lock);
201 		return NULL;
202 	}
203 	spin_unlock(&rlun->lock);
204 
205 	bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
206 	rblk->next_page = 0;
207 	rblk->nr_invalid_pages = 0;
208 	atomic_set(&rblk->data_cmnt_size, 0);
209 
210 	return rblk;
211 }
212 
rrpc_put_blk(struct rrpc * rrpc,struct rrpc_block * rblk)213 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
214 {
215 	struct rrpc_lun *rlun = rblk->rlun;
216 
217 	spin_lock(&rlun->lock);
218 	if (rblk->state & NVM_BLK_ST_TGT) {
219 		list_move_tail(&rblk->list, &rlun->free_list);
220 		rlun->nr_free_blocks++;
221 		rblk->state = NVM_BLK_ST_FREE;
222 	} else if (rblk->state & NVM_BLK_ST_BAD) {
223 		list_move_tail(&rblk->list, &rlun->bb_list);
224 		rblk->state = NVM_BLK_ST_BAD;
225 	} else {
226 		WARN_ON_ONCE(1);
227 		pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
228 					rlun->bppa.g.ch, rlun->bppa.g.lun,
229 					rblk->id, rblk->state);
230 		list_move_tail(&rblk->list, &rlun->bb_list);
231 	}
232 	spin_unlock(&rlun->lock);
233 }
234 
rrpc_put_blks(struct rrpc * rrpc)235 static void rrpc_put_blks(struct rrpc *rrpc)
236 {
237 	struct rrpc_lun *rlun;
238 	int i;
239 
240 	for (i = 0; i < rrpc->nr_luns; i++) {
241 		rlun = &rrpc->luns[i];
242 		if (rlun->cur)
243 			rrpc_put_blk(rrpc, rlun->cur);
244 		if (rlun->gc_cur)
245 			rrpc_put_blk(rrpc, rlun->gc_cur);
246 	}
247 }
248 
get_next_lun(struct rrpc * rrpc)249 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
250 {
251 	int next = atomic_inc_return(&rrpc->next_lun);
252 
253 	return &rrpc->luns[next % rrpc->nr_luns];
254 }
255 
rrpc_gc_kick(struct rrpc * rrpc)256 static void rrpc_gc_kick(struct rrpc *rrpc)
257 {
258 	struct rrpc_lun *rlun;
259 	unsigned int i;
260 
261 	for (i = 0; i < rrpc->nr_luns; i++) {
262 		rlun = &rrpc->luns[i];
263 		queue_work(rrpc->krqd_wq, &rlun->ws_gc);
264 	}
265 }
266 
267 /*
268  * timed GC every interval.
269  */
rrpc_gc_timer(unsigned long data)270 static void rrpc_gc_timer(unsigned long data)
271 {
272 	struct rrpc *rrpc = (struct rrpc *)data;
273 
274 	rrpc_gc_kick(rrpc);
275 	mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
276 }
277 
rrpc_end_sync_bio(struct bio * bio)278 static void rrpc_end_sync_bio(struct bio *bio)
279 {
280 	struct completion *waiting = bio->bi_private;
281 
282 	if (bio->bi_status)
283 		pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
284 
285 	complete(waiting);
286 }
287 
288 /*
289  * rrpc_move_valid_pages -- migrate live data off the block
290  * @rrpc: the 'rrpc' structure
291  * @block: the block from which to migrate live pages
292  *
293  * Description:
294  *   GC algorithms may call this function to migrate remaining live
295  *   pages off the block prior to erasing it. This function blocks
296  *   further execution until the operation is complete.
297  */
rrpc_move_valid_pages(struct rrpc * rrpc,struct rrpc_block * rblk)298 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
299 {
300 	struct nvm_tgt_dev *dev = rrpc->dev;
301 	struct request_queue *q = dev->q;
302 	struct rrpc_rev_addr *rev;
303 	struct nvm_rq *rqd;
304 	struct bio *bio;
305 	struct page *page;
306 	int slot;
307 	int nr_sec_per_blk = dev->geo.sec_per_blk;
308 	u64 phys_addr;
309 	DECLARE_COMPLETION_ONSTACK(wait);
310 
311 	if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
312 		return 0;
313 
314 	bio = bio_alloc(GFP_NOIO, 1);
315 	if (!bio) {
316 		pr_err("nvm: could not alloc bio to gc\n");
317 		return -ENOMEM;
318 	}
319 
320 	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
321 
322 	while ((slot = find_first_zero_bit(rblk->invalid_pages,
323 					    nr_sec_per_blk)) < nr_sec_per_blk) {
324 
325 		/* Lock laddr */
326 		phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
327 
328 try:
329 		spin_lock(&rrpc->rev_lock);
330 		/* Get logical address from physical to logical table */
331 		rev = &rrpc->rev_trans_map[phys_addr];
332 		/* already updated by previous regular write */
333 		if (rev->addr == ADDR_EMPTY) {
334 			spin_unlock(&rrpc->rev_lock);
335 			continue;
336 		}
337 
338 		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
339 		if (IS_ERR_OR_NULL(rqd)) {
340 			spin_unlock(&rrpc->rev_lock);
341 			schedule();
342 			goto try;
343 		}
344 
345 		spin_unlock(&rrpc->rev_lock);
346 
347 		/* Perform read to do GC */
348 		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
349 		bio_set_op_attrs(bio,  REQ_OP_READ, 0);
350 		bio->bi_private = &wait;
351 		bio->bi_end_io = rrpc_end_sync_bio;
352 
353 		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
354 		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
355 
356 		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
357 			pr_err("rrpc: gc read failed.\n");
358 			rrpc_inflight_laddr_release(rrpc, rqd);
359 			goto finished;
360 		}
361 		wait_for_completion_io(&wait);
362 		if (bio->bi_status) {
363 			rrpc_inflight_laddr_release(rrpc, rqd);
364 			goto finished;
365 		}
366 
367 		bio_reset(bio);
368 		reinit_completion(&wait);
369 
370 		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
371 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
372 		bio->bi_private = &wait;
373 		bio->bi_end_io = rrpc_end_sync_bio;
374 
375 		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
376 
377 		/* turn the command around and write the data back to a new
378 		 * address
379 		 */
380 		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
381 			pr_err("rrpc: gc write failed.\n");
382 			rrpc_inflight_laddr_release(rrpc, rqd);
383 			goto finished;
384 		}
385 		wait_for_completion_io(&wait);
386 
387 		rrpc_inflight_laddr_release(rrpc, rqd);
388 		if (bio->bi_status)
389 			goto finished;
390 
391 		bio_reset(bio);
392 	}
393 
394 finished:
395 	mempool_free(page, rrpc->page_pool);
396 	bio_put(bio);
397 
398 	if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
399 		pr_err("nvm: failed to garbage collect block\n");
400 		return -EIO;
401 	}
402 
403 	return 0;
404 }
405 
rrpc_block_gc(struct work_struct * work)406 static void rrpc_block_gc(struct work_struct *work)
407 {
408 	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
409 									ws_gc);
410 	struct rrpc *rrpc = gcb->rrpc;
411 	struct rrpc_block *rblk = gcb->rblk;
412 	struct rrpc_lun *rlun = rblk->rlun;
413 	struct ppa_addr ppa;
414 
415 	mempool_free(gcb, rrpc->gcb_pool);
416 	pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
417 			rlun->bppa.g.ch, rlun->bppa.g.lun,
418 			rblk->id);
419 
420 	if (rrpc_move_valid_pages(rrpc, rblk))
421 		goto put_back;
422 
423 	ppa.ppa = 0;
424 	ppa.g.ch = rlun->bppa.g.ch;
425 	ppa.g.lun = rlun->bppa.g.lun;
426 	ppa.g.blk = rblk->id;
427 
428 	if (nvm_erase_sync(rrpc->dev, &ppa, 1))
429 		goto put_back;
430 
431 	rrpc_put_blk(rrpc, rblk);
432 
433 	return;
434 
435 put_back:
436 	spin_lock(&rlun->lock);
437 	list_add_tail(&rblk->prio, &rlun->prio_list);
438 	spin_unlock(&rlun->lock);
439 }
440 
441 /* the block with highest number of invalid pages, will be in the beginning
442  * of the list
443  */
rblk_max_invalid(struct rrpc_block * ra,struct rrpc_block * rb)444 static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
445 							struct rrpc_block *rb)
446 {
447 	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
448 		return ra;
449 
450 	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
451 }
452 
453 /* linearly find the block with highest number of invalid pages
454  * requires lun->lock
455  */
block_prio_find_max(struct rrpc_lun * rlun)456 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
457 {
458 	struct list_head *prio_list = &rlun->prio_list;
459 	struct rrpc_block *rblk, *max;
460 
461 	BUG_ON(list_empty(prio_list));
462 
463 	max = list_first_entry(prio_list, struct rrpc_block, prio);
464 	list_for_each_entry(rblk, prio_list, prio)
465 		max = rblk_max_invalid(max, rblk);
466 
467 	return max;
468 }
469 
rrpc_lun_gc(struct work_struct * work)470 static void rrpc_lun_gc(struct work_struct *work)
471 {
472 	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
473 	struct rrpc *rrpc = rlun->rrpc;
474 	struct nvm_tgt_dev *dev = rrpc->dev;
475 	struct rrpc_block_gc *gcb;
476 	unsigned int nr_blocks_need;
477 
478 	nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
479 
480 	if (nr_blocks_need < rrpc->nr_luns)
481 		nr_blocks_need = rrpc->nr_luns;
482 
483 	spin_lock(&rlun->lock);
484 	while (nr_blocks_need > rlun->nr_free_blocks &&
485 					!list_empty(&rlun->prio_list)) {
486 		struct rrpc_block *rblk = block_prio_find_max(rlun);
487 
488 		if (!rblk->nr_invalid_pages)
489 			break;
490 
491 		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
492 		if (!gcb)
493 			break;
494 
495 		list_del_init(&rblk->prio);
496 
497 		WARN_ON(!block_is_full(rrpc, rblk));
498 
499 		pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
500 					rlun->bppa.g.ch, rlun->bppa.g.lun,
501 					rblk->id);
502 
503 		gcb->rrpc = rrpc;
504 		gcb->rblk = rblk;
505 		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
506 
507 		queue_work(rrpc->kgc_wq, &gcb->ws_gc);
508 
509 		nr_blocks_need--;
510 	}
511 	spin_unlock(&rlun->lock);
512 
513 	/* TODO: Hint that request queue can be started again */
514 }
515 
rrpc_gc_queue(struct work_struct * work)516 static void rrpc_gc_queue(struct work_struct *work)
517 {
518 	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
519 									ws_gc);
520 	struct rrpc *rrpc = gcb->rrpc;
521 	struct rrpc_block *rblk = gcb->rblk;
522 	struct rrpc_lun *rlun = rblk->rlun;
523 
524 	spin_lock(&rlun->lock);
525 	list_add_tail(&rblk->prio, &rlun->prio_list);
526 	spin_unlock(&rlun->lock);
527 
528 	mempool_free(gcb, rrpc->gcb_pool);
529 	pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
530 					rlun->bppa.g.ch, rlun->bppa.g.lun,
531 					rblk->id);
532 }
533 
534 static const struct block_device_operations rrpc_fops = {
535 	.owner		= THIS_MODULE,
536 };
537 
rrpc_get_lun_rr(struct rrpc * rrpc,int is_gc)538 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
539 {
540 	unsigned int i;
541 	struct rrpc_lun *rlun, *max_free;
542 
543 	if (!is_gc)
544 		return get_next_lun(rrpc);
545 
546 	/* during GC, we don't care about RR, instead we want to make
547 	 * sure that we maintain evenness between the block luns.
548 	 */
549 	max_free = &rrpc->luns[0];
550 	/* prevent GC-ing lun from devouring pages of a lun with
551 	 * little free blocks. We don't take the lock as we only need an
552 	 * estimate.
553 	 */
554 	rrpc_for_each_lun(rrpc, rlun, i) {
555 		if (rlun->nr_free_blocks > max_free->nr_free_blocks)
556 			max_free = rlun;
557 	}
558 
559 	return max_free;
560 }
561 
rrpc_update_map(struct rrpc * rrpc,sector_t laddr,struct rrpc_block * rblk,u64 paddr)562 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
563 					struct rrpc_block *rblk, u64 paddr)
564 {
565 	struct rrpc_addr *gp;
566 	struct rrpc_rev_addr *rev;
567 
568 	BUG_ON(laddr >= rrpc->nr_sects);
569 
570 	gp = &rrpc->trans_map[laddr];
571 	spin_lock(&rrpc->rev_lock);
572 	if (gp->rblk)
573 		rrpc_page_invalidate(rrpc, gp);
574 
575 	gp->addr = paddr;
576 	gp->rblk = rblk;
577 
578 	rev = &rrpc->rev_trans_map[gp->addr];
579 	rev->addr = laddr;
580 	spin_unlock(&rrpc->rev_lock);
581 
582 	return gp;
583 }
584 
rrpc_alloc_addr(struct rrpc * rrpc,struct rrpc_block * rblk)585 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
586 {
587 	u64 addr = ADDR_EMPTY;
588 
589 	spin_lock(&rblk->lock);
590 	if (block_is_full(rrpc, rblk))
591 		goto out;
592 
593 	addr = rblk->next_page;
594 
595 	rblk->next_page++;
596 out:
597 	spin_unlock(&rblk->lock);
598 	return addr;
599 }
600 
601 /* Map logical address to a physical page. The mapping implements a round robin
602  * approach and allocates a page from the next lun available.
603  *
604  * Returns rrpc_addr with the physical address and block. Returns NULL if no
605  * blocks in the next rlun are available.
606  */
rrpc_map_page(struct rrpc * rrpc,sector_t laddr,int is_gc)607 static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
608 								int is_gc)
609 {
610 	struct nvm_tgt_dev *tgt_dev = rrpc->dev;
611 	struct rrpc_lun *rlun;
612 	struct rrpc_block *rblk, **cur_rblk;
613 	struct rrpc_addr *p;
614 	struct ppa_addr ppa;
615 	u64 paddr;
616 	int gc_force = 0;
617 
618 	ppa.ppa = ADDR_EMPTY;
619 	rlun = rrpc_get_lun_rr(rrpc, is_gc);
620 
621 	if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
622 		return ppa;
623 
624 	/*
625 	 * page allocation steps:
626 	 * 1. Try to allocate new page from current rblk
627 	 * 2a. If succeed, proceed to map it in and return
628 	 * 2b. If fail, first try to allocate a new block from media manger,
629 	 *     and then retry step 1. Retry until the normal block pool is
630 	 *     exhausted.
631 	 * 3. If exhausted, and garbage collector is requesting the block,
632 	 *    go to the reserved block and retry step 1.
633 	 *    In the case that this fails as well, or it is not GC
634 	 *    requesting, report not able to retrieve a block and let the
635 	 *    caller handle further processing.
636 	 */
637 
638 	spin_lock(&rlun->lock);
639 	cur_rblk = &rlun->cur;
640 	rblk = rlun->cur;
641 retry:
642 	paddr = rrpc_alloc_addr(rrpc, rblk);
643 
644 	if (paddr != ADDR_EMPTY)
645 		goto done;
646 
647 	if (!list_empty(&rlun->wblk_list)) {
648 new_blk:
649 		rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
650 									prio);
651 		rrpc_set_lun_cur(rlun, rblk, cur_rblk);
652 		list_del(&rblk->prio);
653 		goto retry;
654 	}
655 	spin_unlock(&rlun->lock);
656 
657 	rblk = rrpc_get_blk(rrpc, rlun, gc_force);
658 	if (rblk) {
659 		spin_lock(&rlun->lock);
660 		list_add_tail(&rblk->prio, &rlun->wblk_list);
661 		/*
662 		 * another thread might already have added a new block,
663 		 * Therefore, make sure that one is used, instead of the
664 		 * one just added.
665 		 */
666 		goto new_blk;
667 	}
668 
669 	if (unlikely(is_gc) && !gc_force) {
670 		/* retry from emergency gc block */
671 		cur_rblk = &rlun->gc_cur;
672 		rblk = rlun->gc_cur;
673 		gc_force = 1;
674 		spin_lock(&rlun->lock);
675 		goto retry;
676 	}
677 
678 	pr_err("rrpc: failed to allocate new block\n");
679 	return ppa;
680 done:
681 	spin_unlock(&rlun->lock);
682 	p = rrpc_update_map(rrpc, laddr, rblk, paddr);
683 	if (!p)
684 		return ppa;
685 
686 	/* return global address */
687 	return rrpc_ppa_to_gaddr(tgt_dev, p);
688 }
689 
rrpc_run_gc(struct rrpc * rrpc,struct rrpc_block * rblk)690 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
691 {
692 	struct rrpc_block_gc *gcb;
693 
694 	gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
695 	if (!gcb) {
696 		pr_err("rrpc: unable to queue block for gc.");
697 		return;
698 	}
699 
700 	gcb->rrpc = rrpc;
701 	gcb->rblk = rblk;
702 
703 	INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
704 	queue_work(rrpc->kgc_wq, &gcb->ws_gc);
705 }
706 
rrpc_ppa_to_lun(struct rrpc * rrpc,struct ppa_addr p)707 static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
708 {
709 	struct rrpc_lun *rlun = NULL;
710 	int i;
711 
712 	for (i = 0; i < rrpc->nr_luns; i++) {
713 		if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
714 				rrpc->luns[i].bppa.g.lun == p.g.lun) {
715 			rlun = &rrpc->luns[i];
716 			break;
717 		}
718 	}
719 
720 	return rlun;
721 }
722 
__rrpc_mark_bad_block(struct rrpc * rrpc,struct ppa_addr ppa)723 static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
724 {
725 	struct nvm_tgt_dev *dev = rrpc->dev;
726 	struct rrpc_lun *rlun;
727 	struct rrpc_block *rblk;
728 
729 	rlun = rrpc_ppa_to_lun(rrpc, ppa);
730 	rblk = &rlun->blocks[ppa.g.blk];
731 	rblk->state = NVM_BLK_ST_BAD;
732 
733 	nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
734 }
735 
rrpc_mark_bad_block(struct rrpc * rrpc,struct nvm_rq * rqd)736 static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
737 {
738 	void *comp_bits = &rqd->ppa_status;
739 	struct ppa_addr ppa, prev_ppa;
740 	int nr_ppas = rqd->nr_ppas;
741 	int bit;
742 
743 	if (rqd->nr_ppas == 1)
744 		__rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
745 
746 	ppa_set_empty(&prev_ppa);
747 	bit = -1;
748 	while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
749 		ppa = rqd->ppa_list[bit];
750 		if (ppa_cmp_blk(ppa, prev_ppa))
751 			continue;
752 
753 		__rrpc_mark_bad_block(rrpc, ppa);
754 	}
755 }
756 
rrpc_end_io_write(struct rrpc * rrpc,struct rrpc_rq * rrqd,sector_t laddr,uint8_t npages)757 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
758 						sector_t laddr, uint8_t npages)
759 {
760 	struct nvm_tgt_dev *dev = rrpc->dev;
761 	struct rrpc_addr *p;
762 	struct rrpc_block *rblk;
763 	int cmnt_size, i;
764 
765 	for (i = 0; i < npages; i++) {
766 		p = &rrpc->trans_map[laddr + i];
767 		rblk = p->rblk;
768 
769 		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
770 		if (unlikely(cmnt_size == dev->geo.sec_per_blk))
771 			rrpc_run_gc(rrpc, rblk);
772 	}
773 }
774 
rrpc_end_io(struct nvm_rq * rqd)775 static void rrpc_end_io(struct nvm_rq *rqd)
776 {
777 	struct rrpc *rrpc = rqd->private;
778 	struct nvm_tgt_dev *dev = rrpc->dev;
779 	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
780 	uint8_t npages = rqd->nr_ppas;
781 	sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
782 
783 	if (bio_data_dir(rqd->bio) == WRITE) {
784 		if (rqd->error == NVM_RSP_ERR_FAILWRITE)
785 			rrpc_mark_bad_block(rrpc, rqd);
786 
787 		rrpc_end_io_write(rrpc, rrqd, laddr, npages);
788 	}
789 
790 	bio_put(rqd->bio);
791 
792 	if (rrqd->flags & NVM_IOTYPE_GC)
793 		return;
794 
795 	rrpc_unlock_rq(rrpc, rqd);
796 
797 	if (npages > 1)
798 		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
799 
800 	mempool_free(rqd, rrpc->rq_pool);
801 }
802 
rrpc_read_ppalist_rq(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd,unsigned long flags,int npages)803 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
804 			struct nvm_rq *rqd, unsigned long flags, int npages)
805 {
806 	struct nvm_tgt_dev *dev = rrpc->dev;
807 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
808 	struct rrpc_addr *gp;
809 	sector_t laddr = rrpc_get_laddr(bio);
810 	int is_gc = flags & NVM_IOTYPE_GC;
811 	int i;
812 
813 	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
814 		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
815 		return NVM_IO_REQUEUE;
816 	}
817 
818 	for (i = 0; i < npages; i++) {
819 		/* We assume that mapping occurs at 4KB granularity */
820 		BUG_ON(!(laddr + i < rrpc->nr_sects));
821 		gp = &rrpc->trans_map[laddr + i];
822 
823 		if (gp->rblk) {
824 			rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
825 		} else {
826 			BUG_ON(is_gc);
827 			rrpc_unlock_laddr(rrpc, r);
828 			nvm_dev_dma_free(dev->parent, rqd->ppa_list,
829 							rqd->dma_ppa_list);
830 			return NVM_IO_DONE;
831 		}
832 	}
833 
834 	rqd->opcode = NVM_OP_HBREAD;
835 
836 	return NVM_IO_OK;
837 }
838 
rrpc_read_rq(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd,unsigned long flags)839 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
840 							unsigned long flags)
841 {
842 	int is_gc = flags & NVM_IOTYPE_GC;
843 	sector_t laddr = rrpc_get_laddr(bio);
844 	struct rrpc_addr *gp;
845 
846 	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
847 		return NVM_IO_REQUEUE;
848 
849 	BUG_ON(!(laddr < rrpc->nr_sects));
850 	gp = &rrpc->trans_map[laddr];
851 
852 	if (gp->rblk) {
853 		rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
854 	} else {
855 		BUG_ON(is_gc);
856 		rrpc_unlock_rq(rrpc, rqd);
857 		return NVM_IO_DONE;
858 	}
859 
860 	rqd->opcode = NVM_OP_HBREAD;
861 
862 	return NVM_IO_OK;
863 }
864 
rrpc_write_ppalist_rq(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd,unsigned long flags,int npages)865 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
866 			struct nvm_rq *rqd, unsigned long flags, int npages)
867 {
868 	struct nvm_tgt_dev *dev = rrpc->dev;
869 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
870 	struct ppa_addr p;
871 	sector_t laddr = rrpc_get_laddr(bio);
872 	int is_gc = flags & NVM_IOTYPE_GC;
873 	int i;
874 
875 	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
876 		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
877 		return NVM_IO_REQUEUE;
878 	}
879 
880 	for (i = 0; i < npages; i++) {
881 		/* We assume that mapping occurs at 4KB granularity */
882 		p = rrpc_map_page(rrpc, laddr + i, is_gc);
883 		if (p.ppa == ADDR_EMPTY) {
884 			BUG_ON(is_gc);
885 			rrpc_unlock_laddr(rrpc, r);
886 			nvm_dev_dma_free(dev->parent, rqd->ppa_list,
887 							rqd->dma_ppa_list);
888 			rrpc_gc_kick(rrpc);
889 			return NVM_IO_REQUEUE;
890 		}
891 
892 		rqd->ppa_list[i] = p;
893 	}
894 
895 	rqd->opcode = NVM_OP_HBWRITE;
896 
897 	return NVM_IO_OK;
898 }
899 
rrpc_write_rq(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd,unsigned long flags)900 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
901 				struct nvm_rq *rqd, unsigned long flags)
902 {
903 	struct ppa_addr p;
904 	int is_gc = flags & NVM_IOTYPE_GC;
905 	sector_t laddr = rrpc_get_laddr(bio);
906 
907 	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
908 		return NVM_IO_REQUEUE;
909 
910 	p = rrpc_map_page(rrpc, laddr, is_gc);
911 	if (p.ppa == ADDR_EMPTY) {
912 		BUG_ON(is_gc);
913 		rrpc_unlock_rq(rrpc, rqd);
914 		rrpc_gc_kick(rrpc);
915 		return NVM_IO_REQUEUE;
916 	}
917 
918 	rqd->ppa_addr = p;
919 	rqd->opcode = NVM_OP_HBWRITE;
920 
921 	return NVM_IO_OK;
922 }
923 
rrpc_setup_rq(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd,unsigned long flags,uint8_t npages)924 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
925 			struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
926 {
927 	struct nvm_tgt_dev *dev = rrpc->dev;
928 
929 	if (npages > 1) {
930 		rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
931 							&rqd->dma_ppa_list);
932 		if (!rqd->ppa_list) {
933 			pr_err("rrpc: not able to allocate ppa list\n");
934 			return NVM_IO_ERR;
935 		}
936 
937 		if (bio_op(bio) == REQ_OP_WRITE)
938 			return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
939 									npages);
940 
941 		return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
942 	}
943 
944 	if (bio_op(bio) == REQ_OP_WRITE)
945 		return rrpc_write_rq(rrpc, bio, rqd, flags);
946 
947 	return rrpc_read_rq(rrpc, bio, rqd, flags);
948 }
949 
rrpc_submit_io(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd,unsigned long flags)950 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
951 				struct nvm_rq *rqd, unsigned long flags)
952 {
953 	struct nvm_tgt_dev *dev = rrpc->dev;
954 	struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
955 	uint8_t nr_pages = rrpc_get_pages(bio);
956 	int bio_size = bio_sectors(bio) << 9;
957 	int err;
958 
959 	if (bio_size < dev->geo.sec_size)
960 		return NVM_IO_ERR;
961 	else if (bio_size > dev->geo.max_rq_size)
962 		return NVM_IO_ERR;
963 
964 	err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
965 	if (err)
966 		return err;
967 
968 	bio_get(bio);
969 	rqd->bio = bio;
970 	rqd->private = rrpc;
971 	rqd->nr_ppas = nr_pages;
972 	rqd->end_io = rrpc_end_io;
973 	rrq->flags = flags;
974 
975 	err = nvm_submit_io(dev, rqd);
976 	if (err) {
977 		pr_err("rrpc: I/O submission failed: %d\n", err);
978 		bio_put(bio);
979 		if (!(flags & NVM_IOTYPE_GC)) {
980 			rrpc_unlock_rq(rrpc, rqd);
981 			if (rqd->nr_ppas > 1)
982 				nvm_dev_dma_free(dev->parent, rqd->ppa_list,
983 							rqd->dma_ppa_list);
984 		}
985 		return NVM_IO_ERR;
986 	}
987 
988 	return NVM_IO_OK;
989 }
990 
rrpc_make_rq(struct request_queue * q,struct bio * bio)991 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
992 {
993 	struct rrpc *rrpc = q->queuedata;
994 	struct nvm_rq *rqd;
995 	int err;
996 
997 	blk_queue_split(q, &bio);
998 
999 	if (bio_op(bio) == REQ_OP_DISCARD) {
1000 		rrpc_discard(rrpc, bio);
1001 		return BLK_QC_T_NONE;
1002 	}
1003 
1004 	rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
1005 	memset(rqd, 0, sizeof(struct nvm_rq));
1006 
1007 	err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
1008 	switch (err) {
1009 	case NVM_IO_OK:
1010 		return BLK_QC_T_NONE;
1011 	case NVM_IO_ERR:
1012 		bio_io_error(bio);
1013 		break;
1014 	case NVM_IO_DONE:
1015 		bio_endio(bio);
1016 		break;
1017 	case NVM_IO_REQUEUE:
1018 		spin_lock(&rrpc->bio_lock);
1019 		bio_list_add(&rrpc->requeue_bios, bio);
1020 		spin_unlock(&rrpc->bio_lock);
1021 		queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
1022 		break;
1023 	}
1024 
1025 	mempool_free(rqd, rrpc->rq_pool);
1026 	return BLK_QC_T_NONE;
1027 }
1028 
rrpc_requeue(struct work_struct * work)1029 static void rrpc_requeue(struct work_struct *work)
1030 {
1031 	struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
1032 	struct bio_list bios;
1033 	struct bio *bio;
1034 
1035 	bio_list_init(&bios);
1036 
1037 	spin_lock(&rrpc->bio_lock);
1038 	bio_list_merge(&bios, &rrpc->requeue_bios);
1039 	bio_list_init(&rrpc->requeue_bios);
1040 	spin_unlock(&rrpc->bio_lock);
1041 
1042 	while ((bio = bio_list_pop(&bios)))
1043 		rrpc_make_rq(rrpc->disk->queue, bio);
1044 }
1045 
rrpc_gc_free(struct rrpc * rrpc)1046 static void rrpc_gc_free(struct rrpc *rrpc)
1047 {
1048 	if (rrpc->krqd_wq)
1049 		destroy_workqueue(rrpc->krqd_wq);
1050 
1051 	if (rrpc->kgc_wq)
1052 		destroy_workqueue(rrpc->kgc_wq);
1053 }
1054 
rrpc_gc_init(struct rrpc * rrpc)1055 static int rrpc_gc_init(struct rrpc *rrpc)
1056 {
1057 	rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
1058 								rrpc->nr_luns);
1059 	if (!rrpc->krqd_wq)
1060 		return -ENOMEM;
1061 
1062 	rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
1063 	if (!rrpc->kgc_wq)
1064 		return -ENOMEM;
1065 
1066 	setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
1067 
1068 	return 0;
1069 }
1070 
rrpc_map_free(struct rrpc * rrpc)1071 static void rrpc_map_free(struct rrpc *rrpc)
1072 {
1073 	vfree(rrpc->rev_trans_map);
1074 	vfree(rrpc->trans_map);
1075 }
1076 
rrpc_l2p_update(u64 slba,u32 nlb,__le64 * entries,void * private)1077 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1078 {
1079 	struct rrpc *rrpc = (struct rrpc *)private;
1080 	struct nvm_tgt_dev *dev = rrpc->dev;
1081 	struct rrpc_addr *addr = rrpc->trans_map + slba;
1082 	struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1083 	struct rrpc_lun *rlun;
1084 	struct rrpc_block *rblk;
1085 	u64 i;
1086 
1087 	for (i = 0; i < nlb; i++) {
1088 		struct ppa_addr gaddr;
1089 		u64 pba = le64_to_cpu(entries[i]);
1090 		unsigned int mod;
1091 
1092 		/* LNVM treats address-spaces as silos, LBA and PBA are
1093 		 * equally large and zero-indexed.
1094 		 */
1095 		if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1096 			pr_err("nvm: L2P data entry is out of bounds!\n");
1097 			pr_err("nvm: Maybe loaded an old target L2P\n");
1098 			return -EINVAL;
1099 		}
1100 
1101 		/* Address zero is a special one. The first page on a disk is
1102 		 * protected. As it often holds internal device boot
1103 		 * information.
1104 		 */
1105 		if (!pba)
1106 			continue;
1107 
1108 		div_u64_rem(pba, rrpc->nr_sects, &mod);
1109 
1110 		gaddr = rrpc_recov_addr(dev, pba);
1111 		rlun = rrpc_ppa_to_lun(rrpc, gaddr);
1112 		if (!rlun) {
1113 			pr_err("rrpc: l2p corruption on lba %llu\n",
1114 							slba + i);
1115 			return -EINVAL;
1116 		}
1117 
1118 		rblk = &rlun->blocks[gaddr.g.blk];
1119 		if (!rblk->state) {
1120 			/* at this point, we don't know anything about the
1121 			 * block. It's up to the FTL on top to re-etablish the
1122 			 * block state. The block is assumed to be open.
1123 			 */
1124 			list_move_tail(&rblk->list, &rlun->used_list);
1125 			rblk->state = NVM_BLK_ST_TGT;
1126 			rlun->nr_free_blocks--;
1127 		}
1128 
1129 		addr[i].addr = pba;
1130 		addr[i].rblk = rblk;
1131 		raddr[mod].addr = slba + i;
1132 	}
1133 
1134 	return 0;
1135 }
1136 
rrpc_map_init(struct rrpc * rrpc)1137 static int rrpc_map_init(struct rrpc *rrpc)
1138 {
1139 	struct nvm_tgt_dev *dev = rrpc->dev;
1140 	sector_t i;
1141 	int ret;
1142 
1143 	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1144 	if (!rrpc->trans_map)
1145 		return -ENOMEM;
1146 
1147 	rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1148 							* rrpc->nr_sects);
1149 	if (!rrpc->rev_trans_map)
1150 		return -ENOMEM;
1151 
1152 	for (i = 0; i < rrpc->nr_sects; i++) {
1153 		struct rrpc_addr *p = &rrpc->trans_map[i];
1154 		struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1155 
1156 		p->addr = ADDR_EMPTY;
1157 		r->addr = ADDR_EMPTY;
1158 	}
1159 
1160 	/* Bring up the mapping table from device */
1161 	ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1162 							rrpc_l2p_update, rrpc);
1163 	if (ret) {
1164 		pr_err("nvm: rrpc: could not read L2P table.\n");
1165 		return -EINVAL;
1166 	}
1167 
1168 	return 0;
1169 }
1170 
1171 /* Minimum pages needed within a lun */
1172 #define PAGE_POOL_SIZE 16
1173 #define ADDR_POOL_SIZE 64
1174 
rrpc_core_init(struct rrpc * rrpc)1175 static int rrpc_core_init(struct rrpc *rrpc)
1176 {
1177 	down_write(&rrpc_lock);
1178 	if (!rrpc_gcb_cache) {
1179 		rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1180 				sizeof(struct rrpc_block_gc), 0, 0, NULL);
1181 		if (!rrpc_gcb_cache) {
1182 			up_write(&rrpc_lock);
1183 			return -ENOMEM;
1184 		}
1185 
1186 		rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1187 				sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1188 				0, 0, NULL);
1189 		if (!rrpc_rq_cache) {
1190 			kmem_cache_destroy(rrpc_gcb_cache);
1191 			up_write(&rrpc_lock);
1192 			return -ENOMEM;
1193 		}
1194 	}
1195 	up_write(&rrpc_lock);
1196 
1197 	rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1198 	if (!rrpc->page_pool)
1199 		return -ENOMEM;
1200 
1201 	rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
1202 								rrpc_gcb_cache);
1203 	if (!rrpc->gcb_pool)
1204 		return -ENOMEM;
1205 
1206 	rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1207 	if (!rrpc->rq_pool)
1208 		return -ENOMEM;
1209 
1210 	spin_lock_init(&rrpc->inflights.lock);
1211 	INIT_LIST_HEAD(&rrpc->inflights.reqs);
1212 
1213 	return 0;
1214 }
1215 
rrpc_core_free(struct rrpc * rrpc)1216 static void rrpc_core_free(struct rrpc *rrpc)
1217 {
1218 	mempool_destroy(rrpc->page_pool);
1219 	mempool_destroy(rrpc->gcb_pool);
1220 	mempool_destroy(rrpc->rq_pool);
1221 }
1222 
rrpc_luns_free(struct rrpc * rrpc)1223 static void rrpc_luns_free(struct rrpc *rrpc)
1224 {
1225 	struct rrpc_lun *rlun;
1226 	int i;
1227 
1228 	if (!rrpc->luns)
1229 		return;
1230 
1231 	for (i = 0; i < rrpc->nr_luns; i++) {
1232 		rlun = &rrpc->luns[i];
1233 		vfree(rlun->blocks);
1234 	}
1235 
1236 	kfree(rrpc->luns);
1237 }
1238 
rrpc_bb_discovery(struct nvm_tgt_dev * dev,struct rrpc_lun * rlun)1239 static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
1240 {
1241 	struct nvm_geo *geo = &dev->geo;
1242 	struct rrpc_block *rblk;
1243 	struct ppa_addr ppa;
1244 	u8 *blks;
1245 	int nr_blks;
1246 	int i;
1247 	int ret;
1248 
1249 	if (!dev->parent->ops->get_bb_tbl)
1250 		return 0;
1251 
1252 	nr_blks = geo->blks_per_lun * geo->plane_mode;
1253 	blks = kmalloc(nr_blks, GFP_KERNEL);
1254 	if (!blks)
1255 		return -ENOMEM;
1256 
1257 	ppa.ppa = 0;
1258 	ppa.g.ch = rlun->bppa.g.ch;
1259 	ppa.g.lun = rlun->bppa.g.lun;
1260 
1261 	ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
1262 	if (ret) {
1263 		pr_err("rrpc: could not get BB table\n");
1264 		goto out;
1265 	}
1266 
1267 	nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
1268 	if (nr_blks < 0) {
1269 		ret = nr_blks;
1270 		goto out;
1271 	}
1272 
1273 	for (i = 0; i < nr_blks; i++) {
1274 		if (blks[i] == NVM_BLK_T_FREE)
1275 			continue;
1276 
1277 		rblk = &rlun->blocks[i];
1278 		list_move_tail(&rblk->list, &rlun->bb_list);
1279 		rblk->state = NVM_BLK_ST_BAD;
1280 		rlun->nr_free_blocks--;
1281 	}
1282 
1283 out:
1284 	kfree(blks);
1285 	return ret;
1286 }
1287 
rrpc_set_lun_ppa(struct rrpc_lun * rlun,struct ppa_addr ppa)1288 static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
1289 {
1290 	rlun->bppa.ppa = 0;
1291 	rlun->bppa.g.ch = ppa.g.ch;
1292 	rlun->bppa.g.lun = ppa.g.lun;
1293 }
1294 
rrpc_luns_init(struct rrpc * rrpc,struct ppa_addr * luns)1295 static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
1296 {
1297 	struct nvm_tgt_dev *dev = rrpc->dev;
1298 	struct nvm_geo *geo = &dev->geo;
1299 	struct rrpc_lun *rlun;
1300 	int i, j, ret = -EINVAL;
1301 
1302 	if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1303 		pr_err("rrpc: number of pages per block too high.");
1304 		return -EINVAL;
1305 	}
1306 
1307 	spin_lock_init(&rrpc->rev_lock);
1308 
1309 	rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1310 								GFP_KERNEL);
1311 	if (!rrpc->luns)
1312 		return -ENOMEM;
1313 
1314 	/* 1:1 mapping */
1315 	for (i = 0; i < rrpc->nr_luns; i++) {
1316 		rlun = &rrpc->luns[i];
1317 		rlun->id = i;
1318 		rrpc_set_lun_ppa(rlun, luns[i]);
1319 		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1320 							geo->blks_per_lun);
1321 		if (!rlun->blocks) {
1322 			ret = -ENOMEM;
1323 			goto err;
1324 		}
1325 
1326 		INIT_LIST_HEAD(&rlun->free_list);
1327 		INIT_LIST_HEAD(&rlun->used_list);
1328 		INIT_LIST_HEAD(&rlun->bb_list);
1329 
1330 		for (j = 0; j < geo->blks_per_lun; j++) {
1331 			struct rrpc_block *rblk = &rlun->blocks[j];
1332 
1333 			rblk->id = j;
1334 			rblk->rlun = rlun;
1335 			rblk->state = NVM_BLK_T_FREE;
1336 			INIT_LIST_HEAD(&rblk->prio);
1337 			INIT_LIST_HEAD(&rblk->list);
1338 			spin_lock_init(&rblk->lock);
1339 
1340 			list_add_tail(&rblk->list, &rlun->free_list);
1341 		}
1342 
1343 		rlun->rrpc = rrpc;
1344 		rlun->nr_free_blocks = geo->blks_per_lun;
1345 		rlun->reserved_blocks = 2; /* for GC only */
1346 
1347 		INIT_LIST_HEAD(&rlun->prio_list);
1348 		INIT_LIST_HEAD(&rlun->wblk_list);
1349 
1350 		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1351 		spin_lock_init(&rlun->lock);
1352 
1353 		if (rrpc_bb_discovery(dev, rlun))
1354 			goto err;
1355 
1356 	}
1357 
1358 	return 0;
1359 err:
1360 	return ret;
1361 }
1362 
1363 /* returns 0 on success and stores the beginning address in *begin */
rrpc_area_init(struct rrpc * rrpc,sector_t * begin)1364 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1365 {
1366 	struct nvm_tgt_dev *dev = rrpc->dev;
1367 	sector_t size = rrpc->nr_sects * dev->geo.sec_size;
1368 	int ret;
1369 
1370 	size >>= 9;
1371 
1372 	ret = nvm_get_area(dev, begin, size);
1373 	if (!ret)
1374 		*begin >>= (ilog2(dev->geo.sec_size) - 9);
1375 
1376 	return ret;
1377 }
1378 
rrpc_area_free(struct rrpc * rrpc)1379 static void rrpc_area_free(struct rrpc *rrpc)
1380 {
1381 	struct nvm_tgt_dev *dev = rrpc->dev;
1382 	sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
1383 
1384 	nvm_put_area(dev, begin);
1385 }
1386 
rrpc_free(struct rrpc * rrpc)1387 static void rrpc_free(struct rrpc *rrpc)
1388 {
1389 	rrpc_gc_free(rrpc);
1390 	rrpc_map_free(rrpc);
1391 	rrpc_core_free(rrpc);
1392 	rrpc_luns_free(rrpc);
1393 	rrpc_area_free(rrpc);
1394 
1395 	kfree(rrpc);
1396 }
1397 
rrpc_exit(void * private)1398 static void rrpc_exit(void *private)
1399 {
1400 	struct rrpc *rrpc = private;
1401 
1402 	del_timer(&rrpc->gc_timer);
1403 
1404 	flush_workqueue(rrpc->krqd_wq);
1405 	flush_workqueue(rrpc->kgc_wq);
1406 
1407 	rrpc_free(rrpc);
1408 }
1409 
rrpc_capacity(void * private)1410 static sector_t rrpc_capacity(void *private)
1411 {
1412 	struct rrpc *rrpc = private;
1413 	struct nvm_tgt_dev *dev = rrpc->dev;
1414 	sector_t reserved, provisioned;
1415 
1416 	/* cur, gc, and two emergency blocks for each lun */
1417 	reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
1418 	provisioned = rrpc->nr_sects - reserved;
1419 
1420 	if (reserved > rrpc->nr_sects) {
1421 		pr_err("rrpc: not enough space available to expose storage.\n");
1422 		return 0;
1423 	}
1424 
1425 	sector_div(provisioned, 10);
1426 	return provisioned * 9 * NR_PHY_IN_LOG;
1427 }
1428 
1429 /*
1430  * Looks up the logical address from reverse trans map and check if its valid by
1431  * comparing the logical to physical address with the physical address.
1432  * Returns 0 on free, otherwise 1 if in use
1433  */
rrpc_block_map_update(struct rrpc * rrpc,struct rrpc_block * rblk)1434 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1435 {
1436 	struct nvm_tgt_dev *dev = rrpc->dev;
1437 	int offset;
1438 	struct rrpc_addr *laddr;
1439 	u64 bpaddr, paddr, pladdr;
1440 
1441 	bpaddr = block_to_rel_addr(rrpc, rblk);
1442 	for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
1443 		paddr = bpaddr + offset;
1444 
1445 		pladdr = rrpc->rev_trans_map[paddr].addr;
1446 		if (pladdr == ADDR_EMPTY)
1447 			continue;
1448 
1449 		laddr = &rrpc->trans_map[pladdr];
1450 
1451 		if (paddr == laddr->addr) {
1452 			laddr->rblk = rblk;
1453 		} else {
1454 			set_bit(offset, rblk->invalid_pages);
1455 			rblk->nr_invalid_pages++;
1456 		}
1457 	}
1458 }
1459 
rrpc_blocks_init(struct rrpc * rrpc)1460 static int rrpc_blocks_init(struct rrpc *rrpc)
1461 {
1462 	struct nvm_tgt_dev *dev = rrpc->dev;
1463 	struct rrpc_lun *rlun;
1464 	struct rrpc_block *rblk;
1465 	int lun_iter, blk_iter;
1466 
1467 	for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1468 		rlun = &rrpc->luns[lun_iter];
1469 
1470 		for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
1471 								blk_iter++) {
1472 			rblk = &rlun->blocks[blk_iter];
1473 			rrpc_block_map_update(rrpc, rblk);
1474 		}
1475 	}
1476 
1477 	return 0;
1478 }
1479 
rrpc_luns_configure(struct rrpc * rrpc)1480 static int rrpc_luns_configure(struct rrpc *rrpc)
1481 {
1482 	struct rrpc_lun *rlun;
1483 	struct rrpc_block *rblk;
1484 	int i;
1485 
1486 	for (i = 0; i < rrpc->nr_luns; i++) {
1487 		rlun = &rrpc->luns[i];
1488 
1489 		rblk = rrpc_get_blk(rrpc, rlun, 0);
1490 		if (!rblk)
1491 			goto err;
1492 		rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1493 
1494 		/* Emergency gc block */
1495 		rblk = rrpc_get_blk(rrpc, rlun, 1);
1496 		if (!rblk)
1497 			goto err;
1498 		rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1499 	}
1500 
1501 	return 0;
1502 err:
1503 	rrpc_put_blks(rrpc);
1504 	return -EINVAL;
1505 }
1506 
1507 static struct nvm_tgt_type tt_rrpc;
1508 
rrpc_init(struct nvm_tgt_dev * dev,struct gendisk * tdisk,int flags)1509 static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1510 		       int flags)
1511 {
1512 	struct request_queue *bqueue = dev->q;
1513 	struct request_queue *tqueue = tdisk->queue;
1514 	struct nvm_geo *geo = &dev->geo;
1515 	struct rrpc *rrpc;
1516 	sector_t soffset;
1517 	int ret;
1518 
1519 	if (!(dev->identity.dom & NVM_RSP_L2P)) {
1520 		pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1521 							dev->identity.dom);
1522 		return ERR_PTR(-EINVAL);
1523 	}
1524 
1525 	rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1526 	if (!rrpc)
1527 		return ERR_PTR(-ENOMEM);
1528 
1529 	rrpc->dev = dev;
1530 	rrpc->disk = tdisk;
1531 
1532 	bio_list_init(&rrpc->requeue_bios);
1533 	spin_lock_init(&rrpc->bio_lock);
1534 	INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1535 
1536 	rrpc->nr_luns = geo->nr_luns;
1537 	rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
1538 
1539 	/* simple round-robin strategy */
1540 	atomic_set(&rrpc->next_lun, -1);
1541 
1542 	ret = rrpc_area_init(rrpc, &soffset);
1543 	if (ret < 0) {
1544 		pr_err("nvm: rrpc: could not initialize area\n");
1545 		return ERR_PTR(ret);
1546 	}
1547 	rrpc->soffset = soffset;
1548 
1549 	ret = rrpc_luns_init(rrpc, dev->luns);
1550 	if (ret) {
1551 		pr_err("nvm: rrpc: could not initialize luns\n");
1552 		goto err;
1553 	}
1554 
1555 	ret = rrpc_core_init(rrpc);
1556 	if (ret) {
1557 		pr_err("nvm: rrpc: could not initialize core\n");
1558 		goto err;
1559 	}
1560 
1561 	ret = rrpc_map_init(rrpc);
1562 	if (ret) {
1563 		pr_err("nvm: rrpc: could not initialize maps\n");
1564 		goto err;
1565 	}
1566 
1567 	ret = rrpc_blocks_init(rrpc);
1568 	if (ret) {
1569 		pr_err("nvm: rrpc: could not initialize state for blocks\n");
1570 		goto err;
1571 	}
1572 
1573 	ret = rrpc_luns_configure(rrpc);
1574 	if (ret) {
1575 		pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1576 		goto err;
1577 	}
1578 
1579 	ret = rrpc_gc_init(rrpc);
1580 	if (ret) {
1581 		pr_err("nvm: rrpc: could not initialize gc\n");
1582 		goto err;
1583 	}
1584 
1585 	/* inherit the size from the underlying device */
1586 	blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1587 	blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1588 
1589 	pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1590 			rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1591 
1592 	mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1593 
1594 	return rrpc;
1595 err:
1596 	rrpc_free(rrpc);
1597 	return ERR_PTR(ret);
1598 }
1599 
1600 /* round robin, page-based FTL, and cost-based GC */
1601 static struct nvm_tgt_type tt_rrpc = {
1602 	.name		= "rrpc",
1603 	.version	= {1, 0, 0},
1604 
1605 	.make_rq	= rrpc_make_rq,
1606 	.capacity	= rrpc_capacity,
1607 
1608 	.init		= rrpc_init,
1609 	.exit		= rrpc_exit,
1610 };
1611 
rrpc_module_init(void)1612 static int __init rrpc_module_init(void)
1613 {
1614 	return nvm_register_tgt_type(&tt_rrpc);
1615 }
1616 
rrpc_module_exit(void)1617 static void rrpc_module_exit(void)
1618 {
1619 	nvm_unregister_tgt_type(&tt_rrpc);
1620 }
1621 
1622 module_init(rrpc_module_init);
1623 module_exit(rrpc_module_exit);
1624 MODULE_LICENSE("GPL v2");
1625 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
1626