• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-write.c - pblk's write path from write buffer to media
17  */
18 
19 #include "pblk.h"
20 #include "pblk-trace.h"
21 
pblk_end_w_bio(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_c_ctx * c_ctx)22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23 				    struct pblk_c_ctx *c_ctx)
24 {
25 	struct bio *original_bio;
26 	struct pblk_rb *rwb = &pblk->rwb;
27 	unsigned long ret;
28 	int i;
29 
30 	for (i = 0; i < c_ctx->nr_valid; i++) {
31 		struct pblk_w_ctx *w_ctx;
32 		int pos = c_ctx->sentry + i;
33 		int flags;
34 
35 		w_ctx = pblk_rb_w_ctx(rwb, pos);
36 		flags = READ_ONCE(w_ctx->flags);
37 
38 		if (flags & PBLK_FLUSH_ENTRY) {
39 			flags &= ~PBLK_FLUSH_ENTRY;
40 			/* Release flags on context. Protect from writes */
41 			smp_store_release(&w_ctx->flags, flags);
42 
43 #ifdef CONFIG_NVM_PBLK_DEBUG
44 			atomic_dec(&rwb->inflight_flush_point);
45 #endif
46 		}
47 
48 		while ((original_bio = bio_list_pop(&w_ctx->bios)))
49 			bio_endio(original_bio);
50 	}
51 
52 	if (c_ctx->nr_padded)
53 		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
54 							c_ctx->nr_padded);
55 
56 #ifdef CONFIG_NVM_PBLK_DEBUG
57 	atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
58 #endif
59 
60 	ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
61 
62 	bio_put(rqd->bio);
63 	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
64 
65 	return ret;
66 }
67 
pblk_end_queued_w_bio(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_c_ctx * c_ctx)68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
69 					   struct nvm_rq *rqd,
70 					   struct pblk_c_ctx *c_ctx)
71 {
72 	list_del(&c_ctx->list);
73 	return pblk_end_w_bio(pblk, rqd, c_ctx);
74 }
75 
pblk_complete_write(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_c_ctx * c_ctx)76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77 				struct pblk_c_ctx *c_ctx)
78 {
79 	struct pblk_c_ctx *c, *r;
80 	unsigned long flags;
81 	unsigned long pos;
82 
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84 	atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
85 #endif
86 	pblk_up_rq(pblk, c_ctx->lun_bitmap);
87 
88 	pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89 	if (pos == c_ctx->sentry) {
90 		pos = pblk_end_w_bio(pblk, rqd, c_ctx);
91 
92 retry:
93 		list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94 			rqd = nvm_rq_from_c_ctx(c);
95 			if (c->sentry == pos) {
96 				pos = pblk_end_queued_w_bio(pblk, rqd, c);
97 				goto retry;
98 			}
99 		}
100 	} else {
101 		WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102 		list_add_tail(&c_ctx->list, &pblk->compl_list);
103 	}
104 	pblk_rb_sync_end(&pblk->rwb, &flags);
105 }
106 
107 /* Map remaining sectors in chunk, starting from ppa */
pblk_map_remaining(struct pblk * pblk,struct ppa_addr * ppa,int rqd_ppas)108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
109 		int rqd_ppas)
110 {
111 	struct pblk_line *line;
112 	struct ppa_addr map_ppa = *ppa;
113 	__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
114 	__le64 *lba_list;
115 	u64 paddr;
116 	int done = 0;
117 	int n = 0;
118 
119 	line = pblk_ppa_to_line(pblk, *ppa);
120 	lba_list = emeta_to_lbas(pblk, line->emeta->buf);
121 
122 	spin_lock(&line->lock);
123 
124 	while (!done)  {
125 		paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
126 
127 		if (!test_and_set_bit(paddr, line->map_bitmap))
128 			line->left_msecs--;
129 
130 		if (n < rqd_ppas && lba_list[paddr] != addr_empty)
131 			line->nr_valid_lbas--;
132 
133 		lba_list[paddr] = addr_empty;
134 
135 		if (!test_and_set_bit(paddr, line->invalid_bitmap))
136 			le32_add_cpu(line->vsc, -1);
137 
138 		done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
139 
140 		n++;
141 	}
142 
143 	line->w_err_gc->has_write_err = 1;
144 	spin_unlock(&line->lock);
145 }
146 
pblk_prepare_resubmit(struct pblk * pblk,unsigned int sentry,unsigned int nr_entries)147 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
148 				  unsigned int nr_entries)
149 {
150 	struct pblk_rb *rb = &pblk->rwb;
151 	struct pblk_rb_entry *entry;
152 	struct pblk_line *line;
153 	struct pblk_w_ctx *w_ctx;
154 	struct ppa_addr ppa_l2p;
155 	int flags;
156 	unsigned int i;
157 
158 	spin_lock(&pblk->trans_lock);
159 	for (i = 0; i < nr_entries; i++) {
160 		entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
161 		w_ctx = &entry->w_ctx;
162 
163 		/* Check if the lba has been overwritten */
164 		if (w_ctx->lba != ADDR_EMPTY) {
165 			ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
166 			if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
167 				w_ctx->lba = ADDR_EMPTY;
168 		}
169 
170 		/* Mark up the entry as submittable again */
171 		flags = READ_ONCE(w_ctx->flags);
172 		flags |= PBLK_WRITTEN_DATA;
173 		/* Release flags on write context. Protect from writes */
174 		smp_store_release(&w_ctx->flags, flags);
175 
176 		/* Decrease the reference count to the line as we will
177 		 * re-map these entries
178 		 */
179 		line = pblk_ppa_to_line(pblk, w_ctx->ppa);
180 		atomic_dec(&line->sec_to_update);
181 		kref_put(&line->ref, pblk_line_put);
182 	}
183 	spin_unlock(&pblk->trans_lock);
184 }
185 
pblk_queue_resubmit(struct pblk * pblk,struct pblk_c_ctx * c_ctx)186 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
187 {
188 	struct pblk_c_ctx *r_ctx;
189 
190 	r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
191 	if (!r_ctx)
192 		return;
193 
194 	r_ctx->lun_bitmap = NULL;
195 	r_ctx->sentry = c_ctx->sentry;
196 	r_ctx->nr_valid = c_ctx->nr_valid;
197 	r_ctx->nr_padded = c_ctx->nr_padded;
198 
199 	spin_lock(&pblk->resubmit_lock);
200 	list_add_tail(&r_ctx->list, &pblk->resubmit_list);
201 	spin_unlock(&pblk->resubmit_lock);
202 
203 #ifdef CONFIG_NVM_PBLK_DEBUG
204 	atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
205 #endif
206 }
207 
pblk_submit_rec(struct work_struct * work)208 static void pblk_submit_rec(struct work_struct *work)
209 {
210 	struct pblk_rec_ctx *recovery =
211 			container_of(work, struct pblk_rec_ctx, ws_rec);
212 	struct pblk *pblk = recovery->pblk;
213 	struct nvm_rq *rqd = recovery->rqd;
214 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
215 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
216 
217 	pblk_log_write_err(pblk, rqd);
218 
219 	pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
220 	pblk_queue_resubmit(pblk, c_ctx);
221 
222 	pblk_up_rq(pblk, c_ctx->lun_bitmap);
223 	if (c_ctx->nr_padded)
224 		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
225 							c_ctx->nr_padded);
226 	bio_put(rqd->bio);
227 	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
228 	mempool_free(recovery, &pblk->rec_pool);
229 
230 	atomic_dec(&pblk->inflight_io);
231 	pblk_write_kick(pblk);
232 }
233 
234 
pblk_end_w_fail(struct pblk * pblk,struct nvm_rq * rqd)235 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
236 {
237 	struct pblk_rec_ctx *recovery;
238 
239 	recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
240 	if (!recovery) {
241 		pblk_err(pblk, "could not allocate recovery work\n");
242 		return;
243 	}
244 
245 	recovery->pblk = pblk;
246 	recovery->rqd = rqd;
247 
248 	INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
249 	queue_work(pblk->close_wq, &recovery->ws_rec);
250 }
251 
pblk_end_io_write(struct nvm_rq * rqd)252 static void pblk_end_io_write(struct nvm_rq *rqd)
253 {
254 	struct pblk *pblk = rqd->private;
255 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
256 
257 	if (rqd->error) {
258 		pblk_end_w_fail(pblk, rqd);
259 		return;
260 	} else {
261 		if (trace_pblk_chunk_state_enabled())
262 			pblk_check_chunk_state_update(pblk, rqd);
263 #ifdef CONFIG_NVM_PBLK_DEBUG
264 		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
265 #endif
266 	}
267 
268 	pblk_complete_write(pblk, rqd, c_ctx);
269 	atomic_dec(&pblk->inflight_io);
270 }
271 
pblk_end_io_write_meta(struct nvm_rq * rqd)272 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
273 {
274 	struct pblk *pblk = rqd->private;
275 	struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
276 	struct pblk_line *line = m_ctx->private;
277 	struct pblk_emeta *emeta = line->emeta;
278 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
279 	int sync;
280 
281 	pblk_up_chunk(pblk, ppa_list[0]);
282 
283 	if (rqd->error) {
284 		pblk_log_write_err(pblk, rqd);
285 		pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
286 		line->w_err_gc->has_write_err = 1;
287 	} else {
288 		if (trace_pblk_chunk_state_enabled())
289 			pblk_check_chunk_state_update(pblk, rqd);
290 	}
291 
292 	sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
293 	if (sync == emeta->nr_entries)
294 		pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
295 						GFP_ATOMIC, pblk->close_wq);
296 
297 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
298 
299 	atomic_dec(&pblk->inflight_io);
300 }
301 
pblk_alloc_w_rq(struct pblk * pblk,struct nvm_rq * rqd,unsigned int nr_secs,nvm_end_io_fn (* end_io))302 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
303 			   unsigned int nr_secs, nvm_end_io_fn(*end_io))
304 {
305 	/* Setup write request */
306 	rqd->opcode = NVM_OP_PWRITE;
307 	rqd->nr_ppas = nr_secs;
308 	rqd->is_seq = 1;
309 	rqd->private = pblk;
310 	rqd->end_io = end_io;
311 
312 	return pblk_alloc_rqd_meta(pblk, rqd);
313 }
314 
pblk_setup_w_rq(struct pblk * pblk,struct nvm_rq * rqd,struct ppa_addr * erase_ppa)315 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
316 			   struct ppa_addr *erase_ppa)
317 {
318 	struct pblk_line_meta *lm = &pblk->lm;
319 	struct pblk_line *e_line = pblk_line_get_erase(pblk);
320 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
321 	unsigned int valid = c_ctx->nr_valid;
322 	unsigned int padded = c_ctx->nr_padded;
323 	unsigned int nr_secs = valid + padded;
324 	unsigned long *lun_bitmap;
325 	int ret;
326 
327 	lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
328 	if (!lun_bitmap)
329 		return -ENOMEM;
330 	c_ctx->lun_bitmap = lun_bitmap;
331 
332 	ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
333 	if (ret) {
334 		kfree(lun_bitmap);
335 		return ret;
336 	}
337 
338 	if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
339 		ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
340 							valid, 0);
341 	else
342 		ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
343 							valid, erase_ppa);
344 
345 	return ret;
346 }
347 
pblk_calc_secs_to_sync(struct pblk * pblk,unsigned int secs_avail,unsigned int secs_to_flush)348 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
349 				  unsigned int secs_to_flush)
350 {
351 	int secs_to_sync;
352 
353 	secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
354 
355 #ifdef CONFIG_NVM_PBLK_DEBUG
356 	if ((!secs_to_sync && secs_to_flush)
357 			|| (secs_to_sync < 0)
358 			|| (secs_to_sync > secs_avail && !secs_to_flush)) {
359 		pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
360 				secs_avail, secs_to_sync, secs_to_flush);
361 	}
362 #endif
363 
364 	return secs_to_sync;
365 }
366 
pblk_submit_meta_io(struct pblk * pblk,struct pblk_line * meta_line)367 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
368 {
369 	struct nvm_tgt_dev *dev = pblk->dev;
370 	struct nvm_geo *geo = &dev->geo;
371 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372 	struct pblk_line_meta *lm = &pblk->lm;
373 	struct pblk_emeta *emeta = meta_line->emeta;
374 	struct ppa_addr *ppa_list;
375 	struct pblk_g_ctx *m_ctx;
376 	struct nvm_rq *rqd;
377 	void *data;
378 	u64 paddr;
379 	int rq_ppas = pblk->min_write_pgs;
380 	int id = meta_line->id;
381 	int rq_len;
382 	int i, j;
383 	int ret;
384 
385 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
386 
387 	m_ctx = nvm_rq_to_pdu(rqd);
388 	m_ctx->private = meta_line;
389 
390 	rq_len = rq_ppas * geo->csecs;
391 	data = ((void *)emeta->buf) + emeta->mem;
392 
393 	ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
394 	if (ret)
395 		goto fail_free_rqd;
396 
397 	ppa_list = nvm_rq_to_ppa_list(rqd);
398 	for (i = 0; i < rqd->nr_ppas; ) {
399 		spin_lock(&meta_line->lock);
400 		paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
401 		spin_unlock(&meta_line->lock);
402 		for (j = 0; j < rq_ppas; j++, i++, paddr++)
403 			ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
404 	}
405 
406 	spin_lock(&l_mg->close_lock);
407 	emeta->mem += rq_len;
408 	if (emeta->mem >= lm->emeta_len[0])
409 		list_del(&meta_line->list);
410 	spin_unlock(&l_mg->close_lock);
411 
412 	pblk_down_chunk(pblk, ppa_list[0]);
413 
414 	ret = pblk_submit_io(pblk, rqd, data);
415 	if (ret) {
416 		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
417 		goto fail_rollback;
418 	}
419 
420 	return NVM_IO_OK;
421 
422 fail_rollback:
423 	pblk_up_chunk(pblk, ppa_list[0]);
424 	spin_lock(&l_mg->close_lock);
425 	pblk_dealloc_page(pblk, meta_line, rq_ppas);
426 	list_add(&meta_line->list, &meta_line->list);
427 	spin_unlock(&l_mg->close_lock);
428 fail_free_rqd:
429 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
430 	return ret;
431 }
432 
pblk_valid_meta_ppa(struct pblk * pblk,struct pblk_line * meta_line,struct nvm_rq * data_rqd)433 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
434 				       struct pblk_line *meta_line,
435 				       struct nvm_rq *data_rqd)
436 {
437 	struct nvm_tgt_dev *dev = pblk->dev;
438 	struct nvm_geo *geo = &dev->geo;
439 	struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
440 	struct pblk_line *data_line = pblk_line_get_data(pblk);
441 	struct ppa_addr ppa, ppa_opt;
442 	u64 paddr;
443 	int pos_opt;
444 
445 	/* Schedule a metadata I/O that is half the distance from the data I/O
446 	 * with regards to the number of LUNs forming the pblk instance. This
447 	 * balances LUN conflicts across every I/O.
448 	 *
449 	 * When the LUN configuration changes (e.g., due to GC), this distance
450 	 * can align, which would result on metadata and data I/Os colliding. In
451 	 * this case, modify the distance to not be optimal, but move the
452 	 * optimal in the right direction.
453 	 */
454 	paddr = pblk_lookup_page(pblk, meta_line);
455 	ppa = addr_to_gen_ppa(pblk, paddr, 0);
456 	ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
457 	pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
458 
459 	if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
460 				test_bit(pos_opt, data_line->blk_bitmap))
461 		return true;
462 
463 	if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
464 		data_line->meta_distance--;
465 
466 	return false;
467 }
468 
pblk_should_submit_meta_io(struct pblk * pblk,struct nvm_rq * data_rqd)469 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
470 						    struct nvm_rq *data_rqd)
471 {
472 	struct pblk_line_meta *lm = &pblk->lm;
473 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
474 	struct pblk_line *meta_line;
475 
476 	spin_lock(&l_mg->close_lock);
477 	if (list_empty(&l_mg->emeta_list)) {
478 		spin_unlock(&l_mg->close_lock);
479 		return NULL;
480 	}
481 	meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
482 	if (meta_line->emeta->mem >= lm->emeta_len[0]) {
483 		spin_unlock(&l_mg->close_lock);
484 		return NULL;
485 	}
486 	spin_unlock(&l_mg->close_lock);
487 
488 	if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
489 		return NULL;
490 
491 	return meta_line;
492 }
493 
pblk_submit_io_set(struct pblk * pblk,struct nvm_rq * rqd)494 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
495 {
496 	struct ppa_addr erase_ppa;
497 	struct pblk_line *meta_line;
498 	int err;
499 
500 	pblk_ppa_set_empty(&erase_ppa);
501 
502 	/* Assign lbas to ppas and populate request structure */
503 	err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
504 	if (err) {
505 		pblk_err(pblk, "could not setup write request: %d\n", err);
506 		return NVM_IO_ERR;
507 	}
508 
509 	meta_line = pblk_should_submit_meta_io(pblk, rqd);
510 
511 	/* Submit data write for current data line */
512 	err = pblk_submit_io(pblk, rqd, NULL);
513 	if (err) {
514 		pblk_err(pblk, "data I/O submission failed: %d\n", err);
515 		return NVM_IO_ERR;
516 	}
517 
518 	if (!pblk_ppa_empty(erase_ppa)) {
519 		/* Submit erase for next data line */
520 		if (pblk_blk_erase_async(pblk, erase_ppa)) {
521 			struct pblk_line *e_line = pblk_line_get_erase(pblk);
522 			struct nvm_tgt_dev *dev = pblk->dev;
523 			struct nvm_geo *geo = &dev->geo;
524 			int bit;
525 
526 			atomic_inc(&e_line->left_eblks);
527 			bit = pblk_ppa_to_pos(geo, erase_ppa);
528 			WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
529 		}
530 	}
531 
532 	if (meta_line) {
533 		/* Submit metadata write for previous data line */
534 		err = pblk_submit_meta_io(pblk, meta_line);
535 		if (err) {
536 			pblk_err(pblk, "metadata I/O submission failed: %d",
537 					err);
538 			return NVM_IO_ERR;
539 		}
540 	}
541 
542 	return NVM_IO_OK;
543 }
544 
pblk_free_write_rqd(struct pblk * pblk,struct nvm_rq * rqd)545 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
546 {
547 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
548 	struct bio *bio = rqd->bio;
549 
550 	if (c_ctx->nr_padded)
551 		pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
552 							c_ctx->nr_padded);
553 }
554 
pblk_submit_write(struct pblk * pblk,int * secs_left)555 static int pblk_submit_write(struct pblk *pblk, int *secs_left)
556 {
557 	struct bio *bio;
558 	struct nvm_rq *rqd;
559 	unsigned int secs_avail, secs_to_sync, secs_to_com;
560 	unsigned int secs_to_flush, packed_meta_pgs;
561 	unsigned long pos;
562 	unsigned int resubmit;
563 
564 	*secs_left = 0;
565 
566 	spin_lock(&pblk->resubmit_lock);
567 	resubmit = !list_empty(&pblk->resubmit_list);
568 	spin_unlock(&pblk->resubmit_lock);
569 
570 	/* Resubmit failed writes first */
571 	if (resubmit) {
572 		struct pblk_c_ctx *r_ctx;
573 
574 		spin_lock(&pblk->resubmit_lock);
575 		r_ctx = list_first_entry(&pblk->resubmit_list,
576 					struct pblk_c_ctx, list);
577 		list_del(&r_ctx->list);
578 		spin_unlock(&pblk->resubmit_lock);
579 
580 		secs_avail = r_ctx->nr_valid;
581 		pos = r_ctx->sentry;
582 
583 		pblk_prepare_resubmit(pblk, pos, secs_avail);
584 		secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
585 				secs_avail);
586 
587 		kfree(r_ctx);
588 	} else {
589 		/* If there are no sectors in the cache,
590 		 * flushes (bios without data) will be cleared on
591 		 * the cache threads
592 		 */
593 		secs_avail = pblk_rb_read_count(&pblk->rwb);
594 		if (!secs_avail)
595 			return 0;
596 
597 		secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
598 		if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
599 			return 0;
600 
601 		secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
602 					secs_to_flush);
603 		if (secs_to_sync > pblk->max_write_pgs) {
604 			pblk_err(pblk, "bad buffer sync calculation\n");
605 			return 0;
606 		}
607 
608 		secs_to_com = (secs_to_sync > secs_avail) ?
609 			secs_avail : secs_to_sync;
610 		pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
611 	}
612 
613 	packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
614 	bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
615 
616 	bio->bi_iter.bi_sector = 0; /* internal bio */
617 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
618 
619 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
620 	rqd->bio = bio;
621 
622 	if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
623 								secs_avail)) {
624 		pblk_err(pblk, "corrupted write bio\n");
625 		goto fail_put_bio;
626 	}
627 
628 	if (pblk_submit_io_set(pblk, rqd))
629 		goto fail_free_bio;
630 
631 #ifdef CONFIG_NVM_PBLK_DEBUG
632 	atomic_long_add(secs_to_sync, &pblk->sub_writes);
633 #endif
634 
635 	*secs_left = 1;
636 	return 0;
637 
638 fail_free_bio:
639 	pblk_free_write_rqd(pblk, rqd);
640 fail_put_bio:
641 	bio_put(bio);
642 	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
643 
644 	return -EINTR;
645 }
646 
pblk_write_ts(void * data)647 int pblk_write_ts(void *data)
648 {
649 	struct pblk *pblk = data;
650 	int secs_left;
651 	int write_failure = 0;
652 
653 	while (!kthread_should_stop()) {
654 		if (!write_failure) {
655 			write_failure = pblk_submit_write(pblk, &secs_left);
656 
657 			if (secs_left)
658 				continue;
659 		}
660 		set_current_state(TASK_INTERRUPTIBLE);
661 		io_schedule();
662 	}
663 
664 	return 0;
665 }
666