1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __BLK_NULL_BLK_H
3 #define __BLK_NULL_BLK_H
4
5 #undef pr_fmt
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/blk-mq.h>
11 #include <linux/hrtimer.h>
12 #include <linux/configfs.h>
13 #include <linux/badblocks.h>
14 #include <linux/fault-inject.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17
18 struct nullb_cmd {
19 struct request *rq;
20 struct bio *bio;
21 unsigned int tag;
22 blk_status_t error;
23 struct nullb_queue *nq;
24 struct hrtimer timer;
25 bool fake_timeout;
26 };
27
28 struct nullb_queue {
29 unsigned long *tag_map;
30 wait_queue_head_t wait;
31 unsigned int queue_depth;
32 struct nullb_device *dev;
33 unsigned int requeue_selection;
34
35 struct nullb_cmd *cmds;
36 };
37
38 struct nullb_zone {
39 /*
40 * Zone lock to prevent concurrent modification of a zone write
41 * pointer position and condition: with memory backing, a write
42 * command execution may sleep on memory allocation. For this case,
43 * use mutex as the zone lock. Otherwise, use the spinlock for
44 * locking the zone.
45 */
46 union {
47 spinlock_t spinlock;
48 struct mutex mutex;
49 };
50 enum blk_zone_type type;
51 enum blk_zone_cond cond;
52 sector_t start;
53 sector_t wp;
54 unsigned int len;
55 unsigned int capacity;
56 };
57
58 struct nullb_device {
59 struct nullb *nullb;
60 struct config_item item;
61 struct radix_tree_root data; /* data stored in the disk */
62 struct radix_tree_root cache; /* disk cache data */
63 unsigned long flags; /* device flags */
64 unsigned int curr_cache;
65 struct badblocks badblocks;
66
67 unsigned int nr_zones;
68 unsigned int nr_zones_imp_open;
69 unsigned int nr_zones_exp_open;
70 unsigned int nr_zones_closed;
71 unsigned int imp_close_zone_no;
72 struct nullb_zone *zones;
73 sector_t zone_size_sects;
74 unsigned int zone_size_sects_shift;
75 bool need_zone_res_mgmt;
76 spinlock_t zone_res_lock;
77
78 unsigned long size; /* device size in MB */
79 unsigned long completion_nsec; /* time in ns to complete a request */
80 unsigned long cache_size; /* disk cache size in MB */
81 unsigned long zone_size; /* zone size in MB if device is zoned */
82 unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
83 unsigned int zone_nr_conv; /* number of conventional zones */
84 unsigned int zone_max_open; /* max number of open zones */
85 unsigned int zone_max_active; /* max number of active zones */
86 unsigned int submit_queues; /* number of submission queues */
87 unsigned int home_node; /* home node for the device */
88 unsigned int queue_mode; /* block interface */
89 unsigned int blocksize; /* block size */
90 unsigned int max_sectors; /* Max sectors per command */
91 unsigned int max_segment_size; /* Max size of a single DMA segment. */
92 unsigned int irqmode; /* IRQ completion handler */
93 unsigned int hw_queue_depth; /* queue depth */
94 unsigned int index; /* index of the disk, only valid with a disk */
95 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
96 bool blocking; /* blocking blk-mq device */
97 bool use_per_node_hctx; /* use per-node allocation for hardware context */
98 bool power; /* power on/off the device */
99 bool memory_backed; /* if data is stored in memory */
100 bool discard; /* if support discard */
101 bool zoned; /* if device is zoned */
102 bool virt_boundary; /* virtual boundary on/off for the device */
103 };
104
105 struct nullb {
106 struct nullb_device *dev;
107 struct list_head list;
108 unsigned int index;
109 struct request_queue *q;
110 struct gendisk *disk;
111 struct blk_mq_tag_set *tag_set;
112 struct blk_mq_tag_set __tag_set;
113 unsigned int queue_depth;
114 atomic_long_t cur_bytes;
115 struct hrtimer bw_timer;
116 unsigned long cache_flush_pos;
117 spinlock_t lock;
118
119 struct nullb_queue *queues;
120 unsigned int nr_queues;
121 char disk_name[DISK_NAME_LEN];
122 };
123
124 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
125 sector_t nr_sectors);
126 blk_status_t null_process_cmd(struct nullb_cmd *cmd,
127 enum req_opf op, sector_t sector,
128 unsigned int nr_sectors);
129
130 #ifdef CONFIG_BLK_DEV_ZONED
131 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
132 int null_register_zoned_dev(struct nullb *nullb);
133 void null_free_zoned_dev(struct nullb_device *dev);
134 int null_report_zones(struct gendisk *disk, sector_t sector,
135 unsigned int nr_zones, report_zones_cb cb, void *data);
136 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
137 enum req_opf op, sector_t sector,
138 sector_t nr_sectors);
139 size_t null_zone_valid_read_len(struct nullb *nullb,
140 sector_t sector, unsigned int len);
141 #else
null_init_zoned_dev(struct nullb_device * dev,struct request_queue * q)142 static inline int null_init_zoned_dev(struct nullb_device *dev,
143 struct request_queue *q)
144 {
145 pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
146 return -EINVAL;
147 }
null_register_zoned_dev(struct nullb * nullb)148 static inline int null_register_zoned_dev(struct nullb *nullb)
149 {
150 return -ENODEV;
151 }
null_free_zoned_dev(struct nullb_device * dev)152 static inline void null_free_zoned_dev(struct nullb_device *dev) {}
null_process_zoned_cmd(struct nullb_cmd * cmd,enum req_opf op,sector_t sector,sector_t nr_sectors)153 static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
154 enum req_opf op, sector_t sector, sector_t nr_sectors)
155 {
156 return BLK_STS_NOTSUPP;
157 }
null_zone_valid_read_len(struct nullb * nullb,sector_t sector,unsigned int len)158 static inline size_t null_zone_valid_read_len(struct nullb *nullb,
159 sector_t sector,
160 unsigned int len)
161 {
162 return len;
163 }
164 #define null_report_zones NULL
165 #endif /* CONFIG_BLK_DEV_ZONED */
166 #endif /* __NULL_BLK_H */
167