• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Matias Bjorling <matias@cnexlabs.com>
5  * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * Implementation of a Physical Block-device target for Open-channel SSDs.
17  *
18  */
19 
20 #ifndef PBLK_H_
21 #define PBLK_H_
22 
23 #include <linux/blkdev.h>
24 #include <linux/blk-mq.h>
25 #include <linux/bio.h>
26 #include <linux/module.h>
27 #include <linux/kthread.h>
28 #include <linux/vmalloc.h>
29 #include <linux/crc32.h>
30 #include <linux/uuid.h>
31 
32 #include <linux/lightnvm.h>
33 
34 /* Run only GC if less than 1/X blocks are free */
35 #define GC_LIMIT_INVERSE 5
36 #define GC_TIME_MSECS 1000
37 
38 #define PBLK_SECTOR (512)
39 #define PBLK_EXPOSED_PAGE_SIZE (4096)
40 #define PBLK_MAX_REQ_ADDRS (64)
41 #define PBLK_MAX_REQ_ADDRS_PW (6)
42 
43 #define PBLK_WS_POOL_SIZE (128)
44 #define PBLK_META_POOL_SIZE (128)
45 #define PBLK_READ_REQ_POOL_SIZE (1024)
46 
47 #define PBLK_NR_CLOSE_JOBS (4)
48 
49 #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
50 
51 #define PBLK_COMMAND_TIMEOUT_MS 30000
52 
53 /* Max 512 LUNs per device */
54 #define PBLK_MAX_LUNS_BITMAP (4)
55 
56 #define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
57 
58 #define pblk_for_each_lun(pblk, rlun, i) \
59 		for ((i) = 0, rlun = &(pblk)->luns[0]; \
60 			(i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
61 
62 #define ERASE 2 /* READ = 0, WRITE = 1 */
63 
64 enum {
65 	/* IO Types */
66 	PBLK_IOTYPE_USER	= 1 << 0,
67 	PBLK_IOTYPE_GC		= 1 << 1,
68 
69 	/* Write buffer flags */
70 	PBLK_FLUSH_ENTRY	= 1 << 2,
71 	PBLK_WRITTEN_DATA	= 1 << 3,
72 	PBLK_SUBMITTED_ENTRY	= 1 << 4,
73 	PBLK_WRITABLE_ENTRY	= 1 << 5,
74 };
75 
76 enum {
77 	PBLK_BLK_ST_OPEN =	0x1,
78 	PBLK_BLK_ST_CLOSED =	0x2,
79 };
80 
81 struct pblk_sec_meta {
82 	u64 reserved;
83 	__le64 lba;
84 };
85 
86 /* The number of GC lists and the rate-limiter states go together. This way the
87  * rate-limiter can dictate how much GC is needed based on resource utilization.
88  */
89 #define PBLK_GC_NR_LISTS 3
90 
91 enum {
92 	PBLK_RL_HIGH = 1,
93 	PBLK_RL_MID = 2,
94 	PBLK_RL_LOW = 3,
95 };
96 
97 #define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
98 
99 /* write buffer completion context */
100 struct pblk_c_ctx {
101 	struct list_head list;		/* Head for out-of-order completion */
102 
103 	unsigned long *lun_bitmap;	/* Luns used on current request */
104 	unsigned int sentry;
105 	unsigned int nr_valid;
106 	unsigned int nr_padded;
107 };
108 
109 /* generic context */
110 struct pblk_g_ctx {
111 	void *private;
112 };
113 
114 /* Pad context */
115 struct pblk_pad_rq {
116 	struct pblk *pblk;
117 	struct completion wait;
118 	struct kref ref;
119 };
120 
121 /* Recovery context */
122 struct pblk_rec_ctx {
123 	struct pblk *pblk;
124 	struct nvm_rq *rqd;
125 	struct list_head failed;
126 	struct work_struct ws_rec;
127 };
128 
129 /* Write context */
130 struct pblk_w_ctx {
131 	struct bio_list bios;		/* Original bios - used for completion
132 					 * in REQ_FUA, REQ_FLUSH case
133 					 */
134 	u64 lba;			/* Logic addr. associated with entry */
135 	struct ppa_addr ppa;		/* Physic addr. associated with entry */
136 	int flags;			/* Write context flags */
137 };
138 
139 struct pblk_rb_entry {
140 	struct ppa_addr cacheline;	/* Cacheline for this entry */
141 	void *data;			/* Pointer to data on this entry */
142 	struct pblk_w_ctx w_ctx;	/* Context for this entry */
143 	struct list_head index;		/* List head to enable indexes */
144 };
145 
146 #define EMPTY_ENTRY (~0U)
147 
148 struct pblk_rb_pages {
149 	struct page *pages;
150 	int order;
151 	struct list_head list;
152 };
153 
154 struct pblk_rb {
155 	struct pblk_rb_entry *entries;	/* Ring buffer entries */
156 	unsigned int mem;		/* Write offset - points to next
157 					 * writable entry in memory
158 					 */
159 	unsigned int subm;		/* Read offset - points to last entry
160 					 * that has been submitted to the media
161 					 * to be persisted
162 					 */
163 	unsigned int sync;		/* Synced - backpointer that signals
164 					 * the last submitted entry that has
165 					 * been successfully persisted to media
166 					 */
167 	unsigned int sync_point;	/* Sync point - last entry that must be
168 					 * flushed to the media. Used with
169 					 * REQ_FLUSH and REQ_FUA
170 					 */
171 	unsigned int l2p_update;	/* l2p update point - next entry for
172 					 * which l2p mapping will be updated to
173 					 * contain a device ppa address (instead
174 					 * of a cacheline
175 					 */
176 	unsigned int nr_entries;	/* Number of entries in write buffer -
177 					 * must be a power of two
178 					 */
179 	unsigned int seg_size;		/* Size of the data segments being
180 					 * stored on each entry. Typically this
181 					 * will be 4KB
182 					 */
183 
184 	struct list_head pages;		/* List of data pages */
185 
186 	spinlock_t w_lock;		/* Write lock */
187 	spinlock_t s_lock;		/* Sync lock */
188 
189 #ifdef CONFIG_NVM_DEBUG
190 	atomic_t inflight_sync_point;	/* Not served REQ_FLUSH | REQ_FUA */
191 #endif
192 };
193 
194 #define PBLK_RECOVERY_SECTORS 16
195 
196 struct pblk_lun {
197 	struct ppa_addr bppa;
198 
199 	u8 *bb_list;			/* Bad block list for LUN. Only used on
200 					 * bring up. Bad blocks are managed
201 					 * within lines on run-time.
202 					 */
203 
204 	struct semaphore wr_sem;
205 };
206 
207 struct pblk_gc_rq {
208 	struct pblk_line *line;
209 	void *data;
210 	u64 lba_list[PBLK_MAX_REQ_ADDRS];
211 	int nr_secs;
212 	int secs_to_gc;
213 	struct list_head list;
214 };
215 
216 struct pblk_gc {
217 	/* These states are not protected by a lock since (i) they are in the
218 	 * fast path, and (ii) they are not critical.
219 	 */
220 	int gc_active;
221 	int gc_enabled;
222 	int gc_forced;
223 
224 	struct task_struct *gc_ts;
225 	struct task_struct *gc_writer_ts;
226 	struct task_struct *gc_reader_ts;
227 
228 	struct workqueue_struct *gc_line_reader_wq;
229 	struct workqueue_struct *gc_reader_wq;
230 
231 	struct timer_list gc_timer;
232 
233 	struct semaphore gc_sem;
234 	atomic_t inflight_gc;
235 	int w_entries;
236 
237 	struct list_head w_list;
238 	struct list_head r_list;
239 
240 	spinlock_t lock;
241 	spinlock_t w_lock;
242 	spinlock_t r_lock;
243 };
244 
245 struct pblk_rl {
246 	unsigned int high;	/* Upper threshold for rate limiter (free run -
247 				 * user I/O rate limiter
248 				 */
249 	unsigned int low;	/* Lower threshold for rate limiter (user I/O
250 				 * rate limiter - stall)
251 				 */
252 	unsigned int high_pw;	/* High rounded up as a power of 2 */
253 
254 #define PBLK_USER_HIGH_THRS 8	/* Begin write limit at 12% available blks */
255 #define PBLK_USER_LOW_THRS 10	/* Aggressive GC at 10% available blocks */
256 
257 	int rb_windows_pw;	/* Number of rate windows in the write buffer
258 				 * given as a power-of-2. This guarantees that
259 				 * when user I/O is being rate limited, there
260 				 * will be reserved enough space for the GC to
261 				 * place its payload. A window is of
262 				 * pblk->max_write_pgs size, which in NVMe is
263 				 * 64, i.e., 256kb.
264 				 */
265 	int rb_budget;		/* Total number of entries available for I/O */
266 	int rb_user_max;	/* Max buffer entries available for user I/O */
267 	int rb_gc_max;		/* Max buffer entries available for GC I/O */
268 	int rb_gc_rsv;		/* Reserved buffer entries for GC I/O */
269 	int rb_state;		/* Rate-limiter current state */
270 
271 	atomic_t rb_user_cnt;	/* User I/O buffer counter */
272 	atomic_t rb_gc_cnt;	/* GC I/O buffer counter */
273 	atomic_t rb_space;	/* Space limit in case of reaching capacity */
274 
275 	int rsv_blocks;		/* Reserved blocks for GC */
276 
277 	int rb_user_active;
278 	int rb_gc_active;
279 
280 	struct timer_list u_timer;
281 
282 	unsigned long long nr_secs;
283 	unsigned long total_blocks;
284 	atomic_t free_blocks;
285 };
286 
287 #define PBLK_LINE_EMPTY (~0U)
288 
289 enum {
290 	/* Line Types */
291 	PBLK_LINETYPE_FREE = 0,
292 	PBLK_LINETYPE_LOG = 1,
293 	PBLK_LINETYPE_DATA = 2,
294 
295 	/* Line state */
296 	PBLK_LINESTATE_FREE = 10,
297 	PBLK_LINESTATE_OPEN = 11,
298 	PBLK_LINESTATE_CLOSED = 12,
299 	PBLK_LINESTATE_GC = 13,
300 	PBLK_LINESTATE_BAD = 14,
301 	PBLK_LINESTATE_CORRUPT = 15,
302 
303 	/* GC group */
304 	PBLK_LINEGC_NONE = 20,
305 	PBLK_LINEGC_EMPTY = 21,
306 	PBLK_LINEGC_LOW = 22,
307 	PBLK_LINEGC_MID = 23,
308 	PBLK_LINEGC_HIGH = 24,
309 	PBLK_LINEGC_FULL = 25,
310 };
311 
312 #define PBLK_MAGIC 0x70626c6b /*pblk*/
313 
314 struct line_header {
315 	__le32 crc;
316 	__le32 identifier;	/* pblk identifier */
317 	__u8 uuid[16];		/* instance uuid */
318 	__le16 type;		/* line type */
319 	__le16 version;		/* type version */
320 	__le32 id;		/* line id for current line */
321 };
322 
323 struct line_smeta {
324 	struct line_header header;
325 
326 	__le32 crc;		/* Full structure including struct crc */
327 	/* Previous line metadata */
328 	__le32 prev_id;		/* Line id for previous line */
329 
330 	/* Current line metadata */
331 	__le64 seq_nr;		/* Sequence number for current line */
332 
333 	/* Active writers */
334 	__le32 window_wr_lun;	/* Number of parallel LUNs to write */
335 
336 	__le32 rsvd[2];
337 
338 	__le64 lun_bitmap[];
339 };
340 
341 /*
342  * Metadata layout in media:
343  *	First sector:
344  *		1. struct line_emeta
345  *		2. bad block bitmap (u64 * window_wr_lun)
346  *	Mid sectors (start at lbas_sector):
347  *		3. nr_lbas (u64) forming lba list
348  *	Last sectors (start at vsc_sector):
349  *		4. u32 valid sector count (vsc) for all lines (~0U: free line)
350  */
351 struct line_emeta {
352 	struct line_header header;
353 
354 	__le32 crc;		/* Full structure including struct crc */
355 
356 	/* Previous line metadata */
357 	__le32 prev_id;		/* Line id for prev line */
358 
359 	/* Current line metadata */
360 	__le64 seq_nr;		/* Sequence number for current line */
361 
362 	/* Active writers */
363 	__le32 window_wr_lun;	/* Number of parallel LUNs to write */
364 
365 	/* Bookkeeping for recovery */
366 	__le32 next_id;		/* Line id for next line */
367 	__le64 nr_lbas;		/* Number of lbas mapped in line */
368 	__le64 nr_valid_lbas;	/* Number of valid lbas mapped in line */
369 	__le64 bb_bitmap[];	/* Updated bad block bitmap for line */
370 };
371 
372 struct pblk_emeta {
373 	struct line_emeta *buf;		/* emeta buffer in media format */
374 	int mem;			/* Write offset - points to next
375 					 * writable entry in memory
376 					 */
377 	atomic_t sync;			/* Synced - backpointer that signals the
378 					 * last entry that has been successfully
379 					 * persisted to media
380 					 */
381 	unsigned int nr_entries;	/* Number of emeta entries */
382 };
383 
384 struct pblk_smeta {
385 	struct line_smeta *buf;		/* smeta buffer in persistent format */
386 };
387 
388 struct pblk_line {
389 	struct pblk *pblk;
390 	unsigned int id;		/* Line number corresponds to the
391 					 * block line
392 					 */
393 	unsigned int seq_nr;		/* Unique line sequence number */
394 
395 	int state;			/* PBLK_LINESTATE_X */
396 	int type;			/* PBLK_LINETYPE_X */
397 	int gc_group;			/* PBLK_LINEGC_X */
398 	struct list_head list;		/* Free, GC lists */
399 
400 	unsigned long *lun_bitmap;	/* Bitmap for LUNs mapped in line */
401 
402 	struct pblk_smeta *smeta;	/* Start metadata */
403 	struct pblk_emeta *emeta;	/* End medatada */
404 
405 	int meta_line;			/* Metadata line id */
406 	int meta_distance;		/* Distance between data and metadata */
407 
408 	u64 smeta_ssec;			/* Sector where smeta starts */
409 	u64 emeta_ssec;			/* Sector where emeta starts */
410 
411 	unsigned int sec_in_line;	/* Number of usable secs in line */
412 
413 	atomic_t blk_in_line;		/* Number of good blocks in line */
414 	unsigned long *blk_bitmap;	/* Bitmap for valid/invalid blocks */
415 	unsigned long *erase_bitmap;	/* Bitmap for erased blocks */
416 
417 	unsigned long *map_bitmap;	/* Bitmap for mapped sectors in line */
418 	unsigned long *invalid_bitmap;	/* Bitmap for invalid sectors in line */
419 
420 	atomic_t left_eblks;		/* Blocks left for erasing */
421 	atomic_t left_seblks;		/* Blocks left for sync erasing */
422 
423 	int left_msecs;			/* Sectors left for mapping */
424 	unsigned int cur_sec;		/* Sector map pointer */
425 	unsigned int nr_valid_lbas;	/* Number of valid lbas in line */
426 
427 	__le32 *vsc;			/* Valid sector count in line */
428 
429 	struct kref ref;		/* Write buffer L2P references */
430 
431 	spinlock_t lock;		/* Necessary for invalid_bitmap only */
432 };
433 
434 #define PBLK_DATA_LINES 4
435 
436 enum {
437 	PBLK_KMALLOC_META = 1,
438 	PBLK_VMALLOC_META = 2,
439 };
440 
441 enum {
442 	PBLK_EMETA_TYPE_HEADER = 1,	/* struct line_emeta first sector */
443 	PBLK_EMETA_TYPE_LLBA = 2,	/* lba list - type: __le64 */
444 	PBLK_EMETA_TYPE_VSC = 3,	/* vsc list - type: __le32 */
445 };
446 
447 struct pblk_line_mgmt {
448 	int nr_lines;			/* Total number of full lines */
449 	int nr_free_lines;		/* Number of full lines in free list */
450 
451 	/* Free lists - use free_lock */
452 	struct list_head free_list;	/* Full lines ready to use */
453 	struct list_head corrupt_list;	/* Full lines corrupted */
454 	struct list_head bad_list;	/* Full lines bad */
455 
456 	/* GC lists - use gc_lock */
457 	struct list_head *gc_lists[PBLK_GC_NR_LISTS];
458 	struct list_head gc_high_list;	/* Full lines ready to GC, high isc */
459 	struct list_head gc_mid_list;	/* Full lines ready to GC, mid isc */
460 	struct list_head gc_low_list;	/* Full lines ready to GC, low isc */
461 
462 	struct list_head gc_full_list;	/* Full lines ready to GC, no valid */
463 	struct list_head gc_empty_list;	/* Full lines close, all valid */
464 
465 	struct pblk_line *log_line;	/* Current FTL log line */
466 	struct pblk_line *data_line;	/* Current data line */
467 	struct pblk_line *log_next;	/* Next FTL log line */
468 	struct pblk_line *data_next;	/* Next data line */
469 
470 	struct list_head emeta_list;	/* Lines queued to schedule emeta */
471 
472 	__le32 *vsc_list;		/* Valid sector counts for all lines */
473 
474 	/* Metadata allocation type: VMALLOC | KMALLOC */
475 	int emeta_alloc_type;
476 
477 	/* Pre-allocated metadata for data lines */
478 	struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
479 	struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
480 	unsigned long meta_bitmap;
481 
482 	/* Helpers for fast bitmap calculations */
483 	unsigned long *bb_template;
484 	unsigned long *bb_aux;
485 
486 	unsigned long d_seq_nr;		/* Data line unique sequence number */
487 	unsigned long l_seq_nr;		/* Log line unique sequence number */
488 
489 	spinlock_t free_lock;
490 	spinlock_t close_lock;
491 	spinlock_t gc_lock;
492 };
493 
494 struct pblk_line_meta {
495 	unsigned int smeta_len;		/* Total length for smeta */
496 	unsigned int smeta_sec;		/* Sectors needed for smeta */
497 
498 	unsigned int emeta_len[4];	/* Lengths for emeta:
499 					 *  [0]: Total length
500 					 *  [1]: struct line_emeta length
501 					 *  [2]: L2P portion length
502 					 *  [3]: vsc list length
503 					 */
504 	unsigned int emeta_sec[4];	/* Sectors needed for emeta. Same layout
505 					 * as emeta_len
506 					 */
507 
508 	unsigned int emeta_bb;		/* Boundary for bb that affects emeta */
509 
510 	unsigned int vsc_list_len;	/* Length for vsc list */
511 	unsigned int sec_bitmap_len;	/* Length for sector bitmap in line */
512 	unsigned int blk_bitmap_len;	/* Length for block bitmap in line */
513 	unsigned int lun_bitmap_len;	/* Length for lun bitmap in line */
514 
515 	unsigned int blk_per_line;	/* Number of blocks in a full line */
516 	unsigned int sec_per_line;	/* Number of sectors in a line */
517 	unsigned int dsec_per_line;	/* Number of data sectors in a line */
518 	unsigned int min_blk_line;	/* Min. number of good blocks in line */
519 
520 	unsigned int mid_thrs;		/* Threshold for GC mid list */
521 	unsigned int high_thrs;		/* Threshold for GC high list */
522 
523 	unsigned int meta_distance;	/* Distance between data and metadata */
524 };
525 
526 struct pblk_addr_format {
527 	u64	ch_mask;
528 	u64	lun_mask;
529 	u64	pln_mask;
530 	u64	blk_mask;
531 	u64	pg_mask;
532 	u64	sec_mask;
533 	u8	ch_offset;
534 	u8	lun_offset;
535 	u8	pln_offset;
536 	u8	blk_offset;
537 	u8	pg_offset;
538 	u8	sec_offset;
539 };
540 
541 enum {
542 	PBLK_STATE_RUNNING = 0,
543 	PBLK_STATE_STOPPING = 1,
544 	PBLK_STATE_RECOVERING = 2,
545 	PBLK_STATE_STOPPED = 3,
546 };
547 
548 struct pblk {
549 	struct nvm_tgt_dev *dev;
550 	struct gendisk *disk;
551 
552 	struct kobject kobj;
553 
554 	struct pblk_lun *luns;
555 
556 	struct pblk_line *lines;		/* Line array */
557 	struct pblk_line_mgmt l_mg;		/* Line management */
558 	struct pblk_line_meta lm;		/* Line metadata */
559 
560 	int ppaf_bitsize;
561 	struct pblk_addr_format ppaf;
562 
563 	struct pblk_rb rwb;
564 
565 	int state;			/* pblk line state */
566 
567 	int min_write_pgs; /* Minimum amount of pages required by controller */
568 	int max_write_pgs; /* Maximum amount of pages supported by controller */
569 	int pgs_in_buffer; /* Number of pages that need to be held in buffer to
570 			    * guarantee successful reads.
571 			    */
572 
573 	sector_t capacity; /* Device capacity when bad blocks are subtracted */
574 	int over_pct;      /* Percentage of device used for over-provisioning */
575 
576 	/* pblk provisioning values. Used by rate limiter */
577 	struct pblk_rl rl;
578 
579 	int sec_per_write;
580 
581 	unsigned char instance_uuid[16];
582 #ifdef CONFIG_NVM_DEBUG
583 	/* All debug counters apply to 4kb sector I/Os */
584 	atomic_long_t inflight_writes;	/* Inflight writes (user and gc) */
585 	atomic_long_t padded_writes;	/* Sectors padded due to flush/fua */
586 	atomic_long_t padded_wb;	/* Sectors padded in write buffer */
587 	atomic_long_t nr_flush;		/* Number of flush/fua I/O */
588 	atomic_long_t req_writes;	/* Sectors stored on write buffer */
589 	atomic_long_t sub_writes;	/* Sectors submitted from buffer */
590 	atomic_long_t sync_writes;	/* Sectors synced to media */
591 	atomic_long_t inflight_reads;	/* Inflight sector read requests */
592 	atomic_long_t cache_reads;	/* Read requests that hit the cache */
593 	atomic_long_t sync_reads;	/* Completed sector read requests */
594 	atomic_long_t recov_writes;	/* Sectors submitted from recovery */
595 	atomic_long_t recov_gc_writes;	/* Sectors submitted from write GC */
596 	atomic_long_t recov_gc_reads;	/* Sectors submitted from read GC */
597 #endif
598 
599 	spinlock_t lock;
600 
601 	atomic_long_t read_failed;
602 	atomic_long_t read_empty;
603 	atomic_long_t read_high_ecc;
604 	atomic_long_t read_failed_gc;
605 	atomic_long_t write_failed;
606 	atomic_long_t erase_failed;
607 
608 	atomic_t inflight_io;		/* General inflight I/O counter */
609 
610 	struct task_struct *writer_ts;
611 
612 	/* Simple translation map of logical addresses to physical addresses.
613 	 * The logical addresses is known by the host system, while the physical
614 	 * addresses are used when writing to the disk block device.
615 	 */
616 	unsigned char *trans_map;
617 	spinlock_t trans_lock;
618 
619 	struct list_head compl_list;
620 
621 	mempool_t *page_bio_pool;
622 	mempool_t *line_ws_pool;
623 	mempool_t *rec_pool;
624 	mempool_t *g_rq_pool;
625 	mempool_t *w_rq_pool;
626 	mempool_t *line_meta_pool;
627 
628 	struct workqueue_struct *close_wq;
629 	struct workqueue_struct *bb_wq;
630 
631 	struct timer_list wtimer;
632 
633 	struct pblk_gc gc;
634 };
635 
636 struct pblk_line_ws {
637 	struct pblk *pblk;
638 	struct pblk_line *line;
639 	void *priv;
640 	struct work_struct ws;
641 };
642 
643 #define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
644 #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
645 
646 /*
647  * pblk ring buffer operations
648  */
649 int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
650 		 unsigned int power_size, unsigned int power_seg_sz);
651 unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
652 void *pblk_rb_entries_ref(struct pblk_rb *rb);
653 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
654 			   unsigned int nr_entries, unsigned int *pos);
655 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
656 			 unsigned int *pos);
657 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
658 			      struct pblk_w_ctx w_ctx, unsigned int pos);
659 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
660 			    struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
661 			    unsigned int pos);
662 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
663 void pblk_rb_flush(struct pblk_rb *rb);
664 
665 void pblk_rb_sync_l2p(struct pblk_rb *rb);
666 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
667 				 struct bio *bio, unsigned int pos,
668 				 unsigned int nr_entries, unsigned int count);
669 unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
670 				      struct list_head *list,
671 				      unsigned int max);
672 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
673 			struct ppa_addr ppa, int bio_iter, bool advanced_bio);
674 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
675 
676 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
677 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
678 struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
679 					      struct ppa_addr *ppa);
680 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
681 unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
682 
683 unsigned int pblk_rb_read_count(struct pblk_rb *rb);
684 unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
685 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
686 
687 int pblk_rb_tear_down_check(struct pblk_rb *rb);
688 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
689 void pblk_rb_data_free(struct pblk_rb *rb);
690 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
691 
692 /*
693  * pblk core
694  */
695 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
696 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
697 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
698 			struct pblk_c_ctx *c_ctx);
699 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
700 void pblk_wait_for_meta(struct pblk *pblk);
701 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
702 void pblk_discard(struct pblk *pblk, struct bio *bio);
703 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
704 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
705 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
706 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
707 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
708 			      unsigned int nr_secs, unsigned int len,
709 			      int alloc_type, gfp_t gfp_mask);
710 struct pblk_line *pblk_line_get(struct pblk *pblk);
711 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
712 void pblk_line_replace_data(struct pblk *pblk);
713 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
714 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
715 struct pblk_line *pblk_line_get_data(struct pblk *pblk);
716 struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
717 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
718 int pblk_line_is_full(struct pblk_line *line);
719 void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
720 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
721 void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
722 void pblk_line_close_meta_sync(struct pblk *pblk);
723 void pblk_line_close_ws(struct work_struct *work);
724 void pblk_pipeline_stop(struct pblk *pblk);
725 void pblk_line_mark_bb(struct work_struct *work);
726 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
727 		      void (*work)(struct work_struct *),
728 		      struct workqueue_struct *wq);
729 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
730 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
731 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
732 			 void *emeta_buf);
733 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
734 void pblk_line_put(struct kref *ref);
735 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
736 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
737 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
738 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
739 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
740 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
741 		   unsigned long secs_to_flush);
742 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
743 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
744 		  unsigned long *lun_bitmap);
745 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
746 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
747 		unsigned long *lun_bitmap);
748 void pblk_end_bio_sync(struct bio *bio);
749 void pblk_end_io_sync(struct nvm_rq *rqd);
750 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
751 		       int nr_pages);
752 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
753 			 int nr_pages);
754 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
755 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
756 			   u64 paddr);
757 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
758 void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
759 			   struct ppa_addr ppa);
760 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
761 			 struct ppa_addr ppa, struct ppa_addr entry_line);
762 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
763 		       struct pblk_line *gc_line);
764 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
765 			  u64 *lba_list, int nr_secs);
766 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
767 			 sector_t blba, int nr_secs);
768 
769 /*
770  * pblk user I/O write path
771  */
772 int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
773 			unsigned long flags);
774 int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
775 			   unsigned int nr_entries, unsigned int nr_rec_entries,
776 			   struct pblk_line *gc_line, unsigned long flags);
777 
778 /*
779  * pblk map
780  */
781 void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
782 		       unsigned int sentry, unsigned long *lun_bitmap,
783 		       unsigned int valid_secs, struct ppa_addr *erase_ppa);
784 void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
785 		 unsigned long *lun_bitmap, unsigned int valid_secs,
786 		 unsigned int off);
787 
788 /*
789  * pblk write thread
790  */
791 int pblk_write_ts(void *data);
792 void pblk_write_timer_fn(unsigned long data);
793 void pblk_write_should_kick(struct pblk *pblk);
794 
795 /*
796  * pblk read path
797  */
798 extern struct bio_set *pblk_bio_set;
799 int pblk_submit_read(struct pblk *pblk, struct bio *bio);
800 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
801 			unsigned int nr_secs, unsigned int *secs_to_gc,
802 			struct pblk_line *line);
803 /*
804  * pblk recovery
805  */
806 void pblk_submit_rec(struct work_struct *work);
807 struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
808 int pblk_recov_pad(struct pblk *pblk);
809 __le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
810 int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
811 			struct pblk_rec_ctx *recovery, u64 *comp_bits,
812 			unsigned int comp);
813 
814 /*
815  * pblk gc
816  */
817 #define PBLK_GC_MAX_READERS 8	/* Max number of outstanding GC reader jobs */
818 #define PBLK_GC_W_QD 128	/* Queue depth for inflight GC write I/Os */
819 #define PBLK_GC_L_QD 4		/* Queue depth for inflight GC lines */
820 #define PBLK_GC_RSV_LINE 1	/* Reserved lines for GC */
821 
822 int pblk_gc_init(struct pblk *pblk);
823 void pblk_gc_exit(struct pblk *pblk);
824 void pblk_gc_should_start(struct pblk *pblk);
825 void pblk_gc_should_stop(struct pblk *pblk);
826 void pblk_gc_should_kick(struct pblk *pblk);
827 void pblk_gc_kick(struct pblk *pblk);
828 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
829 			      int *gc_active);
830 int pblk_gc_sysfs_force(struct pblk *pblk, int force);
831 
832 /*
833  * pblk rate limiter
834  */
835 void pblk_rl_init(struct pblk_rl *rl, int budget);
836 void pblk_rl_free(struct pblk_rl *rl);
837 int pblk_rl_high_thrs(struct pblk_rl *rl);
838 int pblk_rl_low_thrs(struct pblk_rl *rl);
839 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
840 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
841 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
842 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
843 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
844 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
845 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
846 int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
847 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
848 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
849 void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
850 int pblk_rl_is_limit(struct pblk_rl *rl);
851 
852 /*
853  * pblk sysfs
854  */
855 int pblk_sysfs_init(struct gendisk *tdisk);
856 void pblk_sysfs_exit(struct gendisk *tdisk);
857 
pblk_malloc(size_t size,int type,gfp_t flags)858 static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
859 {
860 	if (type == PBLK_KMALLOC_META)
861 		return kmalloc(size, flags);
862 	return vmalloc(size);
863 }
864 
pblk_mfree(void * ptr,int type)865 static inline void pblk_mfree(void *ptr, int type)
866 {
867 	if (type == PBLK_KMALLOC_META)
868 		kfree(ptr);
869 	else
870 		vfree(ptr);
871 }
872 
nvm_rq_from_c_ctx(void * c_ctx)873 static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
874 {
875 	return c_ctx - sizeof(struct nvm_rq);
876 }
877 
emeta_to_bb(struct line_emeta * emeta)878 static inline void *emeta_to_bb(struct line_emeta *emeta)
879 {
880 	return emeta->bb_bitmap;
881 }
882 
emeta_to_lbas(struct pblk * pblk,struct line_emeta * emeta)883 static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
884 {
885 	return ((void *)emeta + pblk->lm.emeta_len[1]);
886 }
887 
emeta_to_vsc(struct pblk * pblk,struct line_emeta * emeta)888 static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
889 {
890 	return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
891 }
892 
pblk_line_vsc(struct pblk_line * line)893 static inline int pblk_line_vsc(struct pblk_line *line)
894 {
895 	int vsc;
896 
897 	spin_lock(&line->lock);
898 	vsc = le32_to_cpu(*line->vsc);
899 	spin_unlock(&line->lock);
900 
901 	return vsc;
902 }
903 
904 #define NVM_MEM_PAGE_WRITE (8)
905 
pblk_pad_distance(struct pblk * pblk)906 static inline int pblk_pad_distance(struct pblk *pblk)
907 {
908 	struct nvm_tgt_dev *dev = pblk->dev;
909 	struct nvm_geo *geo = &dev->geo;
910 
911 	return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
912 }
913 
pblk_dev_ppa_to_line(struct ppa_addr p)914 static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
915 {
916 	return p.g.blk;
917 }
918 
pblk_tgt_ppa_to_line(struct ppa_addr p)919 static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
920 {
921 	return p.g.blk;
922 }
923 
pblk_ppa_to_pos(struct nvm_geo * geo,struct ppa_addr p)924 static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
925 {
926 	return p.g.lun * geo->nr_chnls + p.g.ch;
927 }
928 
929 /* A block within a line corresponds to the lun */
pblk_dev_ppa_to_pos(struct nvm_geo * geo,struct ppa_addr p)930 static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
931 {
932 	return p.g.lun * geo->nr_chnls + p.g.ch;
933 }
934 
pblk_ppa32_to_ppa64(struct pblk * pblk,u32 ppa32)935 static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
936 {
937 	struct ppa_addr ppa64;
938 
939 	ppa64.ppa = 0;
940 
941 	if (ppa32 == -1) {
942 		ppa64.ppa = ADDR_EMPTY;
943 	} else if (ppa32 & (1U << 31)) {
944 		ppa64.c.line = ppa32 & ((~0U) >> 1);
945 		ppa64.c.is_cached = 1;
946 	} else {
947 		ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >>
948 							pblk->ppaf.blk_offset;
949 		ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >>
950 							pblk->ppaf.pg_offset;
951 		ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >>
952 							pblk->ppaf.lun_offset;
953 		ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >>
954 							pblk->ppaf.ch_offset;
955 		ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >>
956 							pblk->ppaf.pln_offset;
957 		ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >>
958 							pblk->ppaf.sec_offset;
959 	}
960 
961 	return ppa64;
962 }
963 
pblk_trans_map_get(struct pblk * pblk,sector_t lba)964 static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
965 								sector_t lba)
966 {
967 	struct ppa_addr ppa;
968 
969 	if (pblk->ppaf_bitsize < 32) {
970 		u32 *map = (u32 *)pblk->trans_map;
971 
972 		ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
973 	} else {
974 		struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
975 
976 		ppa = map[lba];
977 	}
978 
979 	return ppa;
980 }
981 
pblk_ppa64_to_ppa32(struct pblk * pblk,struct ppa_addr ppa64)982 static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
983 {
984 	u32 ppa32 = 0;
985 
986 	if (ppa64.ppa == ADDR_EMPTY) {
987 		ppa32 = ~0U;
988 	} else if (ppa64.c.is_cached) {
989 		ppa32 |= ppa64.c.line;
990 		ppa32 |= 1U << 31;
991 	} else {
992 		ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset;
993 		ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset;
994 		ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset;
995 		ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset;
996 		ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset;
997 		ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset;
998 	}
999 
1000 	return ppa32;
1001 }
1002 
pblk_trans_map_set(struct pblk * pblk,sector_t lba,struct ppa_addr ppa)1003 static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1004 						struct ppa_addr ppa)
1005 {
1006 	if (pblk->ppaf_bitsize < 32) {
1007 		u32 *map = (u32 *)pblk->trans_map;
1008 
1009 		map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1010 	} else {
1011 		u64 *map = (u64 *)pblk->trans_map;
1012 
1013 		map[lba] = ppa.ppa;
1014 	}
1015 }
1016 
pblk_dev_ppa_to_line_addr(struct pblk * pblk,struct ppa_addr p)1017 static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1018 							struct ppa_addr p)
1019 {
1020 	u64 paddr;
1021 
1022 	paddr = 0;
1023 	paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
1024 	paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
1025 	paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
1026 	paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
1027 	paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
1028 
1029 	return paddr;
1030 }
1031 
pblk_ppa_empty(struct ppa_addr ppa_addr)1032 static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1033 {
1034 	return (ppa_addr.ppa == ADDR_EMPTY);
1035 }
1036 
pblk_ppa_set_empty(struct ppa_addr * ppa_addr)1037 static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1038 {
1039 	ppa_addr->ppa = ADDR_EMPTY;
1040 }
1041 
pblk_ppa_comp(struct ppa_addr lppa,struct ppa_addr rppa)1042 static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1043 {
1044 	if (lppa.ppa == rppa.ppa)
1045 		return true;
1046 
1047 	return false;
1048 }
1049 
pblk_addr_in_cache(struct ppa_addr ppa)1050 static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1051 {
1052 	return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1053 }
1054 
pblk_addr_to_cacheline(struct ppa_addr ppa)1055 static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1056 {
1057 	return ppa.c.line;
1058 }
1059 
pblk_cacheline_to_addr(int addr)1060 static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1061 {
1062 	struct ppa_addr p;
1063 
1064 	p.c.line = addr;
1065 	p.c.is_cached = 1;
1066 
1067 	return p;
1068 }
1069 
addr_to_gen_ppa(struct pblk * pblk,u64 paddr,u64 line_id)1070 static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1071 					      u64 line_id)
1072 {
1073 	struct ppa_addr ppa;
1074 
1075 	ppa.ppa = 0;
1076 	ppa.g.blk = line_id;
1077 	ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
1078 	ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
1079 	ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
1080 	ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
1081 	ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
1082 
1083 	return ppa;
1084 }
1085 
addr_to_pblk_ppa(struct pblk * pblk,u64 paddr,u64 line_id)1086 static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
1087 					 u64 line_id)
1088 {
1089 	struct ppa_addr ppa;
1090 
1091 	ppa = addr_to_gen_ppa(pblk, paddr, line_id);
1092 
1093 	return ppa;
1094 }
1095 
pblk_calc_meta_header_crc(struct pblk * pblk,struct line_header * header)1096 static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
1097 					    struct line_header *header)
1098 {
1099 	u32 crc = ~(u32)0;
1100 
1101 	crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
1102 				sizeof(struct line_header) - sizeof(crc));
1103 
1104 	return crc;
1105 }
1106 
pblk_calc_smeta_crc(struct pblk * pblk,struct line_smeta * smeta)1107 static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1108 				      struct line_smeta *smeta)
1109 {
1110 	struct pblk_line_meta *lm = &pblk->lm;
1111 	u32 crc = ~(u32)0;
1112 
1113 	crc = crc32_le(crc, (unsigned char *)smeta +
1114 				sizeof(struct line_header) + sizeof(crc),
1115 				lm->smeta_len -
1116 				sizeof(struct line_header) - sizeof(crc));
1117 
1118 	return crc;
1119 }
1120 
pblk_calc_emeta_crc(struct pblk * pblk,struct line_emeta * emeta)1121 static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1122 				      struct line_emeta *emeta)
1123 {
1124 	struct pblk_line_meta *lm = &pblk->lm;
1125 	u32 crc = ~(u32)0;
1126 
1127 	crc = crc32_le(crc, (unsigned char *)emeta +
1128 				sizeof(struct line_header) + sizeof(crc),
1129 				lm->emeta_len[0] -
1130 				sizeof(struct line_header) - sizeof(crc));
1131 
1132 	return crc;
1133 }
1134 
pblk_set_progr_mode(struct pblk * pblk,int type)1135 static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1136 {
1137 	struct nvm_tgt_dev *dev = pblk->dev;
1138 	struct nvm_geo *geo = &dev->geo;
1139 	int flags;
1140 
1141 	flags = geo->plane_mode >> 1;
1142 
1143 	if (type == WRITE)
1144 		flags |= NVM_IO_SCRAMBLE_ENABLE;
1145 
1146 	return flags;
1147 }
1148 
1149 enum {
1150 	PBLK_READ_RANDOM	= 0,
1151 	PBLK_READ_SEQUENTIAL	= 1,
1152 };
1153 
pblk_set_read_mode(struct pblk * pblk,int type)1154 static inline int pblk_set_read_mode(struct pblk *pblk, int type)
1155 {
1156 	struct nvm_tgt_dev *dev = pblk->dev;
1157 	struct nvm_geo *geo = &dev->geo;
1158 	int flags;
1159 
1160 	flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1161 	if (type == PBLK_READ_SEQUENTIAL)
1162 		flags |= geo->plane_mode >> 1;
1163 
1164 	return flags;
1165 }
1166 
pblk_io_aligned(struct pblk * pblk,int nr_secs)1167 static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
1168 {
1169 	return !(nr_secs % pblk->min_write_pgs);
1170 }
1171 
1172 #ifdef CONFIG_NVM_DEBUG
print_ppa(struct ppa_addr * p,char * msg,int error)1173 static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
1174 {
1175 	if (p->c.is_cached) {
1176 		pr_err("ppa: (%s: %x) cache line: %llu\n",
1177 				msg, error, (u64)p->c.line);
1178 	} else {
1179 		pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1180 			msg, error,
1181 			p->g.ch, p->g.lun, p->g.blk,
1182 			p->g.pg, p->g.pl, p->g.sec);
1183 	}
1184 }
1185 
pblk_print_failed_rqd(struct pblk * pblk,struct nvm_rq * rqd,int error)1186 static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1187 					 int error)
1188 {
1189 	int bit = -1;
1190 
1191 	if (rqd->nr_ppas ==  1) {
1192 		print_ppa(&rqd->ppa_addr, "rqd", error);
1193 		return;
1194 	}
1195 
1196 	while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1197 						bit + 1)) < rqd->nr_ppas) {
1198 		print_ppa(&rqd->ppa_list[bit], "rqd", error);
1199 	}
1200 
1201 	pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1202 }
1203 #endif
1204 
pblk_boundary_ppa_checks(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppas,int nr_ppas)1205 static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1206 				       struct ppa_addr *ppas, int nr_ppas)
1207 {
1208 	struct nvm_geo *geo = &tgt_dev->geo;
1209 	struct ppa_addr *ppa;
1210 	int i;
1211 
1212 	for (i = 0; i < nr_ppas; i++) {
1213 		ppa = &ppas[i];
1214 
1215 		if (!ppa->c.is_cached &&
1216 				ppa->g.ch < geo->nr_chnls &&
1217 				ppa->g.lun < geo->luns_per_chnl &&
1218 				ppa->g.pl < geo->nr_planes &&
1219 				ppa->g.blk < geo->blks_per_lun &&
1220 				ppa->g.pg < geo->pgs_per_blk &&
1221 				ppa->g.sec < geo->sec_per_pg)
1222 			continue;
1223 
1224 #ifdef CONFIG_NVM_DEBUG
1225 		print_ppa(ppa, "boundary", i);
1226 #endif
1227 		return 1;
1228 	}
1229 	return 0;
1230 }
1231 
pblk_boundary_paddr_checks(struct pblk * pblk,u64 paddr)1232 static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1233 {
1234 	struct pblk_line_meta *lm = &pblk->lm;
1235 
1236 	if (paddr > lm->sec_per_line)
1237 		return 1;
1238 
1239 	return 0;
1240 }
1241 
pblk_get_bi_idx(struct bio * bio)1242 static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1243 {
1244 	return bio->bi_iter.bi_idx;
1245 }
1246 
pblk_get_lba(struct bio * bio)1247 static inline sector_t pblk_get_lba(struct bio *bio)
1248 {
1249 	return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1250 }
1251 
pblk_get_secs(struct bio * bio)1252 static inline unsigned int pblk_get_secs(struct bio *bio)
1253 {
1254 	return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1255 }
1256 
pblk_get_sector(sector_t lba)1257 static inline sector_t pblk_get_sector(sector_t lba)
1258 {
1259 	return lba * NR_PHY_IN_LOG;
1260 }
1261 
pblk_setup_uuid(struct pblk * pblk)1262 static inline void pblk_setup_uuid(struct pblk *pblk)
1263 {
1264 	uuid_le uuid;
1265 
1266 	uuid_le_gen(&uuid);
1267 	memcpy(pblk->instance_uuid, uuid.b, 16);
1268 }
1269 #endif /* PBLK_H_ */
1270