• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-read.c - pblk's read path
17  */
18 
19 #include "pblk.h"
20 
21 /*
22  * There is no guarantee that the value read from cache has not been updated and
23  * resides at another location in the cache. We guarantee though that if the
24  * value is read from the cache, it belongs to the mapped lba. In order to
25  * guarantee and order between writes and reads are ordered, a flush must be
26  * issued.
27  */
pblk_read_from_cache(struct pblk * pblk,struct bio * bio,sector_t lba,struct ppa_addr ppa)28 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
29 				sector_t lba, struct ppa_addr ppa)
30 {
31 #ifdef CONFIG_NVM_PBLK_DEBUG
32 	/* Callers must ensure that the ppa points to a cache address */
33 	BUG_ON(pblk_ppa_empty(ppa));
34 	BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36 
37 	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
38 }
39 
pblk_read_ppalist_rq(struct pblk * pblk,struct nvm_rq * rqd,struct bio * bio,sector_t blba,bool * from_cache)40 static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
41 				 struct bio *bio, sector_t blba,
42 				 bool *from_cache)
43 {
44 	void *meta_list = rqd->meta_list;
45 	int nr_secs, i;
46 
47 retry:
48 	nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
49 					from_cache);
50 
51 	if (!*from_cache)
52 		goto end;
53 
54 	for (i = 0; i < nr_secs; i++) {
55 		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
56 		sector_t lba = blba + i;
57 
58 		if (pblk_ppa_empty(rqd->ppa_list[i])) {
59 			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
60 
61 			meta->lba = addr_empty;
62 		} else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
63 			/*
64 			 * Try to read from write buffer. The address is later
65 			 * checked on the write buffer to prevent retrieving
66 			 * overwritten data.
67 			 */
68 			if (!pblk_read_from_cache(pblk, bio, lba,
69 							rqd->ppa_list[i])) {
70 				if (i == 0) {
71 					/*
72 					 * We didn't call with bio_advance()
73 					 * yet, so we can just retry.
74 					 */
75 					goto retry;
76 				} else {
77 					/*
78 					 * We already call bio_advance()
79 					 * so we cannot retry and we need
80 					 * to quit that function in order
81 					 * to allow caller to handle the bio
82 					 * splitting in the current sector
83 					 * position.
84 					 */
85 					nr_secs = i;
86 					goto end;
87 				}
88 			}
89 			meta->lba = cpu_to_le64(lba);
90 #ifdef CONFIG_NVM_PBLK_DEBUG
91 			atomic_long_inc(&pblk->cache_reads);
92 #endif
93 		}
94 		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
95 	}
96 
97 end:
98 	if (pblk_io_aligned(pblk, nr_secs))
99 		rqd->is_seq = 1;
100 
101 #ifdef CONFIG_NVM_PBLK_DEBUG
102 	atomic_long_add(nr_secs, &pblk->inflight_reads);
103 #endif
104 
105 	return nr_secs;
106 }
107 
108 
pblk_read_check_seq(struct pblk * pblk,struct nvm_rq * rqd,sector_t blba)109 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
110 				sector_t blba)
111 {
112 	void *meta_list = rqd->meta_list;
113 	int nr_lbas = rqd->nr_ppas;
114 	int i;
115 
116 	if (!pblk_is_oob_meta_supported(pblk))
117 		return;
118 
119 	for (i = 0; i < nr_lbas; i++) {
120 		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
121 		u64 lba = le64_to_cpu(meta->lba);
122 
123 		if (lba == ADDR_EMPTY)
124 			continue;
125 
126 		if (lba != blba + i) {
127 #ifdef CONFIG_NVM_PBLK_DEBUG
128 			struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
129 
130 			print_ppa(pblk, &ppa_list[i], "seq", i);
131 #endif
132 			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
133 							lba, (u64)blba + i);
134 			WARN_ON(1);
135 		}
136 	}
137 }
138 
139 /*
140  * There can be holes in the lba list.
141  */
pblk_read_check_rand(struct pblk * pblk,struct nvm_rq * rqd,u64 * lba_list,int nr_lbas)142 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
143 				 u64 *lba_list, int nr_lbas)
144 {
145 	void *meta_lba_list = rqd->meta_list;
146 	int i, j;
147 
148 	if (!pblk_is_oob_meta_supported(pblk))
149 		return;
150 
151 	for (i = 0, j = 0; i < nr_lbas; i++) {
152 		struct pblk_sec_meta *meta = pblk_get_meta(pblk,
153 							   meta_lba_list, j);
154 		u64 lba = lba_list[i];
155 		u64 meta_lba;
156 
157 		if (lba == ADDR_EMPTY)
158 			continue;
159 
160 		meta_lba = le64_to_cpu(meta->lba);
161 
162 		if (lba != meta_lba) {
163 #ifdef CONFIG_NVM_PBLK_DEBUG
164 			struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
165 
166 			print_ppa(pblk, &ppa_list[j], "rnd", j);
167 #endif
168 			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
169 							meta_lba, lba);
170 			WARN_ON(1);
171 		}
172 
173 		j++;
174 	}
175 
176 	WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
177 }
178 
pblk_end_user_read(struct bio * bio,int error)179 static void pblk_end_user_read(struct bio *bio, int error)
180 {
181 	if (error && error != NVM_RSP_WARN_HIGHECC)
182 		bio_io_error(bio);
183 	else
184 		bio_endio(bio);
185 }
186 
__pblk_end_io_read(struct pblk * pblk,struct nvm_rq * rqd,bool put_line)187 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
188 			       bool put_line)
189 {
190 	struct nvm_tgt_dev *dev = pblk->dev;
191 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
192 	struct bio *int_bio = rqd->bio;
193 	unsigned long start_time = r_ctx->start_time;
194 
195 	generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
196 
197 	if (rqd->error)
198 		pblk_log_read_err(pblk, rqd);
199 
200 	pblk_read_check_seq(pblk, rqd, r_ctx->lba);
201 	bio_put(int_bio);
202 
203 	if (put_line)
204 		pblk_rq_to_line_put(pblk, rqd);
205 
206 #ifdef CONFIG_NVM_PBLK_DEBUG
207 	atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
208 	atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
209 #endif
210 
211 	pblk_free_rqd(pblk, rqd, PBLK_READ);
212 	atomic_dec(&pblk->inflight_io);
213 }
214 
pblk_end_io_read(struct nvm_rq * rqd)215 static void pblk_end_io_read(struct nvm_rq *rqd)
216 {
217 	struct pblk *pblk = rqd->private;
218 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
219 	struct bio *bio = (struct bio *)r_ctx->private;
220 
221 	pblk_end_user_read(bio, rqd->error);
222 	__pblk_end_io_read(pblk, rqd, true);
223 }
224 
pblk_read_rq(struct pblk * pblk,struct nvm_rq * rqd,struct bio * bio,sector_t lba,bool * from_cache)225 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
226 			 sector_t lba, bool *from_cache)
227 {
228 	struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
229 	struct ppa_addr ppa;
230 
231 	pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
232 
233 #ifdef CONFIG_NVM_PBLK_DEBUG
234 	atomic_long_inc(&pblk->inflight_reads);
235 #endif
236 
237 retry:
238 	if (pblk_ppa_empty(ppa)) {
239 		__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
240 
241 		meta->lba = addr_empty;
242 		return;
243 	}
244 
245 	/* Try to read from write buffer. The address is later checked on the
246 	 * write buffer to prevent retrieving overwritten data.
247 	 */
248 	if (pblk_addr_in_cache(ppa)) {
249 		if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
250 			pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
251 			goto retry;
252 		}
253 
254 		meta->lba = cpu_to_le64(lba);
255 
256 #ifdef CONFIG_NVM_PBLK_DEBUG
257 		atomic_long_inc(&pblk->cache_reads);
258 #endif
259 	} else {
260 		rqd->ppa_addr = ppa;
261 	}
262 }
263 
pblk_submit_read(struct pblk * pblk,struct bio * bio)264 void pblk_submit_read(struct pblk *pblk, struct bio *bio)
265 {
266 	struct nvm_tgt_dev *dev = pblk->dev;
267 	struct request_queue *q = dev->q;
268 	sector_t blba = pblk_get_lba(bio);
269 	unsigned int nr_secs = pblk_get_secs(bio);
270 	bool from_cache;
271 	struct pblk_g_ctx *r_ctx;
272 	struct nvm_rq *rqd;
273 	struct bio *int_bio, *split_bio;
274 
275 	generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
276 			      &pblk->disk->part0);
277 
278 	rqd = pblk_alloc_rqd(pblk, PBLK_READ);
279 
280 	rqd->opcode = NVM_OP_PREAD;
281 	rqd->nr_ppas = nr_secs;
282 	rqd->private = pblk;
283 	rqd->end_io = pblk_end_io_read;
284 
285 	r_ctx = nvm_rq_to_pdu(rqd);
286 	r_ctx->start_time = jiffies;
287 	r_ctx->lba = blba;
288 
289 	if (pblk_alloc_rqd_meta(pblk, rqd)) {
290 		bio_io_error(bio);
291 		pblk_free_rqd(pblk, rqd, PBLK_READ);
292 		return;
293 	}
294 
295 	/* Clone read bio to deal internally with:
296 	 * -read errors when reading from drive
297 	 * -bio_advance() calls during cache reads
298 	 */
299 	int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
300 
301 	if (nr_secs > 1)
302 		nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
303 						&from_cache);
304 	else
305 		pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
306 
307 split_retry:
308 	r_ctx->private = bio; /* original bio */
309 	rqd->bio = int_bio; /* internal bio */
310 
311 	if (from_cache && nr_secs == rqd->nr_ppas) {
312 		/* All data was read from cache, we can complete the IO. */
313 		pblk_end_user_read(bio, 0);
314 		atomic_inc(&pblk->inflight_io);
315 		__pblk_end_io_read(pblk, rqd, false);
316 	} else if (nr_secs != rqd->nr_ppas) {
317 		/* The read bio request could be partially filled by the write
318 		 * buffer, but there are some holes that need to be read from
319 		 * the drive. In order to handle this, we will use block layer
320 		 * mechanism to split this request in to smaller ones and make
321 		 * a chain of it.
322 		 */
323 		split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
324 					&pblk_bio_set);
325 		bio_chain(split_bio, bio);
326 		generic_make_request(bio);
327 
328 		/* New bio contains first N sectors of the previous one, so
329 		 * we can continue to use existing rqd, but we need to shrink
330 		 * the number of PPAs in it. New bio is also guaranteed that
331 		 * it contains only either data from cache or from drive, newer
332 		 * mix of them.
333 		 */
334 		bio = split_bio;
335 		rqd->nr_ppas = nr_secs;
336 		if (rqd->nr_ppas == 1)
337 			rqd->ppa_addr = rqd->ppa_list[0];
338 
339 		/* Recreate int_bio - existing might have some needed internal
340 		 * fields modified already.
341 		 */
342 		bio_put(int_bio);
343 		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
344 		goto split_retry;
345 	} else if (pblk_submit_io(pblk, rqd, NULL)) {
346 		/* Submitting IO to drive failed, let's report an error */
347 		rqd->error = -ENODEV;
348 		pblk_end_io_read(rqd);
349 	}
350 }
351 
read_ppalist_rq_gc(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_line * line,u64 * lba_list,u64 * paddr_list_gc,unsigned int nr_secs)352 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
353 			      struct pblk_line *line, u64 *lba_list,
354 			      u64 *paddr_list_gc, unsigned int nr_secs)
355 {
356 	struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
357 	struct ppa_addr ppa_gc;
358 	int valid_secs = 0;
359 	int i;
360 
361 	pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
362 
363 	for (i = 0; i < nr_secs; i++) {
364 		if (lba_list[i] == ADDR_EMPTY)
365 			continue;
366 
367 		ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
368 		if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
369 			paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
370 			continue;
371 		}
372 
373 		rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
374 	}
375 
376 #ifdef CONFIG_NVM_PBLK_DEBUG
377 	atomic_long_add(valid_secs, &pblk->inflight_reads);
378 #endif
379 
380 	return valid_secs;
381 }
382 
read_rq_gc(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_line * line,sector_t lba,u64 paddr_gc)383 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
384 		      struct pblk_line *line, sector_t lba,
385 		      u64 paddr_gc)
386 {
387 	struct ppa_addr ppa_l2p, ppa_gc;
388 	int valid_secs = 0;
389 
390 	if (lba == ADDR_EMPTY)
391 		goto out;
392 
393 	/* logic error: lba out-of-bounds */
394 	if (lba >= pblk->capacity) {
395 		WARN(1, "pblk: read lba out of bounds\n");
396 		goto out;
397 	}
398 
399 	spin_lock(&pblk->trans_lock);
400 	ppa_l2p = pblk_trans_map_get(pblk, lba);
401 	spin_unlock(&pblk->trans_lock);
402 
403 	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
404 	if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
405 		goto out;
406 
407 	rqd->ppa_addr = ppa_l2p;
408 	valid_secs = 1;
409 
410 #ifdef CONFIG_NVM_PBLK_DEBUG
411 	atomic_long_inc(&pblk->inflight_reads);
412 #endif
413 
414 out:
415 	return valid_secs;
416 }
417 
pblk_submit_read_gc(struct pblk * pblk,struct pblk_gc_rq * gc_rq)418 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
419 {
420 	struct nvm_rq rqd;
421 	int ret = NVM_IO_OK;
422 
423 	memset(&rqd, 0, sizeof(struct nvm_rq));
424 
425 	ret = pblk_alloc_rqd_meta(pblk, &rqd);
426 	if (ret)
427 		return ret;
428 
429 	if (gc_rq->nr_secs > 1) {
430 		gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
431 							gc_rq->lba_list,
432 							gc_rq->paddr_list,
433 							gc_rq->nr_secs);
434 		if (gc_rq->secs_to_gc == 1)
435 			rqd.ppa_addr = rqd.ppa_list[0];
436 	} else {
437 		gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
438 							gc_rq->lba_list[0],
439 							gc_rq->paddr_list[0]);
440 	}
441 
442 	if (!(gc_rq->secs_to_gc))
443 		goto out;
444 
445 	rqd.opcode = NVM_OP_PREAD;
446 	rqd.nr_ppas = gc_rq->secs_to_gc;
447 
448 	if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
449 		ret = -EIO;
450 		goto err_free_dma;
451 	}
452 
453 	pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
454 
455 	atomic_dec(&pblk->inflight_io);
456 
457 	if (rqd.error) {
458 		atomic_long_inc(&pblk->read_failed_gc);
459 #ifdef CONFIG_NVM_PBLK_DEBUG
460 		pblk_print_failed_rqd(pblk, &rqd, rqd.error);
461 #endif
462 	}
463 
464 #ifdef CONFIG_NVM_PBLK_DEBUG
465 	atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
466 	atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
467 	atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
468 #endif
469 
470 out:
471 	pblk_free_rqd_meta(pblk, &rqd);
472 	return ret;
473 
474 err_free_dma:
475 	pblk_free_rqd_meta(pblk, &rqd);
476 	return ret;
477 }
478