• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	int (*load_module)(struct gendisk *disk, const char *page, size_t count);
27 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
28 	int (*store_limit)(struct gendisk *disk, const char *page,
29 			size_t count, struct queue_limits *lim);
30 };
31 
32 static ssize_t
queue_var_show(unsigned long var,char * page)33 queue_var_show(unsigned long var, char *page)
34 {
35 	return sprintf(page, "%lu\n", var);
36 }
37 
38 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)39 queue_var_store(unsigned long *var, const char *page, size_t count)
40 {
41 	int err;
42 	unsigned long v;
43 
44 	err = kstrtoul(page, 10, &v);
45 	if (err || v > UINT_MAX)
46 		return -EINVAL;
47 
48 	*var = v;
49 
50 	return count;
51 }
52 
queue_requests_show(struct gendisk * disk,char * page)53 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
54 {
55 	return queue_var_show(disk->queue->nr_requests, page);
56 }
57 
58 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)59 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
60 {
61 	unsigned long nr;
62 	int ret, err;
63 
64 	if (!queue_is_mq(disk->queue))
65 		return -EINVAL;
66 
67 	ret = queue_var_store(&nr, page, count);
68 	if (ret < 0)
69 		return ret;
70 
71 	if (nr < BLKDEV_MIN_RQ)
72 		nr = BLKDEV_MIN_RQ;
73 
74 	err = blk_mq_update_nr_requests(disk->queue, nr);
75 	if (err)
76 		return err;
77 
78 	return ret;
79 }
80 
queue_ra_show(struct gendisk * disk,char * page)81 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
82 {
83 	return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
84 }
85 
86 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)87 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
88 {
89 	unsigned long ra_kb;
90 	ssize_t ret;
91 
92 	ret = queue_var_store(&ra_kb, page, count);
93 	if (ret < 0)
94 		return ret;
95 	disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
96 	return ret;
97 }
98 
99 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
100 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
101 {									\
102 	return queue_var_show(disk->queue->limits._field, page);	\
103 }
104 
105 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)106 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
107 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
108 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
109 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
110 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
111 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
112 QUEUE_SYSFS_LIMIT_SHOW(io_min)
113 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
114 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
115 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
116 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
117 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
118 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
119 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
120 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
121 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
122 
123 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
124 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
125 {									\
126 	return sprintf(page, "%llu\n",					\
127 		(unsigned long long)disk->queue->limits._field <<	\
128 			SECTOR_SHIFT);					\
129 }
130 
131 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
132 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
133 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
134 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
135 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
136 
137 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
138 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
139 {									\
140 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
141 }
142 
143 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
144 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
145 
146 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
147 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
148 {									\
149 	return sprintf(page, "%d\n", _val);				\
150 }
151 
152 /* deprecated fields */
153 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
154 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
155 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
156 
157 static int queue_max_discard_sectors_store(struct gendisk *disk,
158 		const char *page, size_t count, struct queue_limits *lim)
159 {
160 	unsigned long max_discard_bytes;
161 	ssize_t ret;
162 
163 	ret = queue_var_store(&max_discard_bytes, page, count);
164 	if (ret < 0)
165 		return ret;
166 
167 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
168 		return -EINVAL;
169 
170 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
171 		return -EINVAL;
172 
173 	lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
174 	return 0;
175 }
176 
177 /*
178  * For zone append queue_max_zone_append_sectors does not just return the
179  * underlying queue limits, but actually contains a calculation.  Because of
180  * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
181  */
queue_zone_append_max_show(struct gendisk * disk,char * page)182 static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
183 {
184 	return sprintf(page, "%llu\n",
185 		(u64)queue_max_zone_append_sectors(disk->queue) <<
186 			SECTOR_SHIFT);
187 }
188 
189 static int
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)190 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
191 		struct queue_limits *lim)
192 {
193 	unsigned long max_sectors_kb;
194 	ssize_t ret;
195 
196 	ret = queue_var_store(&max_sectors_kb, page, count);
197 	if (ret < 0)
198 		return ret;
199 
200 	lim->max_user_sectors = max_sectors_kb << 1;
201 	return 0;
202 }
203 
queue_feature_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim,blk_features_t feature)204 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
205 		size_t count, struct queue_limits *lim, blk_features_t feature)
206 {
207 	unsigned long val;
208 	ssize_t ret;
209 
210 	ret = queue_var_store(&val, page, count);
211 	if (ret < 0)
212 		return ret;
213 
214 	if (val)
215 		lim->features |= feature;
216 	else
217 		lim->features &= ~feature;
218 	return 0;
219 }
220 
221 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
222 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
223 {									\
224 	return sprintf(page, "%u\n",					\
225 		!!(disk->queue->limits.features & _feature));		\
226 }									\
227 static int queue_##_name##_store(struct gendisk *disk,			\
228 		const char *page, size_t count, struct queue_limits *lim) \
229 {									\
230 	return queue_feature_store(disk, page, count, lim, _feature);	\
231 }
232 
233 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
234 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
235 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
236 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
237 
238 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
239 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
240 {									\
241 	return sprintf(page, "%u\n",					\
242 		!!(disk->queue->limits.features & _feature));		\
243 }
244 
245 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
246 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
247 
queue_poll_show(struct gendisk * disk,char * page)248 static ssize_t queue_poll_show(struct gendisk *disk, char *page)
249 {
250 	if (queue_is_mq(disk->queue))
251 		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
252 	return sysfs_emit(page, "%u\n",
253 		!!(disk->queue->limits.features & BLK_FEAT_POLL));
254 }
255 
queue_zoned_show(struct gendisk * disk,char * page)256 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
257 {
258 	if (blk_queue_is_zoned(disk->queue))
259 		return sprintf(page, "host-managed\n");
260 	return sprintf(page, "none\n");
261 }
262 
queue_nr_zones_show(struct gendisk * disk,char * page)263 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
264 {
265 	return queue_var_show(disk_nr_zones(disk), page);
266 }
267 
queue_nomerges_show(struct gendisk * disk,char * page)268 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
269 {
270 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
271 			       blk_queue_noxmerges(disk->queue), page);
272 }
273 
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)274 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
275 				    size_t count)
276 {
277 	unsigned long nm;
278 	ssize_t ret = queue_var_store(&nm, page, count);
279 
280 	if (ret < 0)
281 		return ret;
282 
283 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
284 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
285 	if (nm == 2)
286 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
287 	else if (nm)
288 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
289 
290 	return ret;
291 }
292 
queue_rq_affinity_show(struct gendisk * disk,char * page)293 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
294 {
295 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
296 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
297 
298 	return queue_var_show(set << force, page);
299 }
300 
301 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)302 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
303 {
304 	ssize_t ret = -EINVAL;
305 #ifdef CONFIG_SMP
306 	struct request_queue *q = disk->queue;
307 	unsigned long val;
308 
309 	ret = queue_var_store(&val, page, count);
310 	if (ret < 0)
311 		return ret;
312 
313 	if (val == 2) {
314 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
315 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
316 	} else if (val == 1) {
317 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
318 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
319 	} else if (val == 0) {
320 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
321 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
322 	}
323 #endif
324 	return ret;
325 }
326 
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)327 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
328 				size_t count)
329 {
330 	return count;
331 }
332 
queue_poll_store(struct gendisk * disk,const char * page,size_t count)333 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
334 				size_t count)
335 {
336 	if (!(disk->queue->limits.features & BLK_FEAT_POLL))
337 		return -EINVAL;
338 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
339 	pr_info_ratelimited("please use driver specific parameters instead.\n");
340 	return count;
341 }
342 
queue_io_timeout_show(struct gendisk * disk,char * page)343 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
344 {
345 	return sprintf(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
346 }
347 
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)348 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
349 				  size_t count)
350 {
351 	unsigned int val;
352 	int err;
353 
354 	err = kstrtou32(page, 10, &val);
355 	if (err || val == 0)
356 		return -EINVAL;
357 
358 	blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
359 
360 	return count;
361 }
362 
queue_wc_show(struct gendisk * disk,char * page)363 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
364 {
365 	if (blk_queue_write_cache(disk->queue))
366 		return sprintf(page, "write back\n");
367 	return sprintf(page, "write through\n");
368 }
369 
queue_wc_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)370 static int queue_wc_store(struct gendisk *disk, const char *page,
371 		size_t count, struct queue_limits *lim)
372 {
373 	bool disable;
374 
375 	if (!strncmp(page, "write back", 10)) {
376 		disable = false;
377 	} else if (!strncmp(page, "write through", 13) ||
378 		   !strncmp(page, "none", 4)) {
379 		disable = true;
380 	} else {
381 		return -EINVAL;
382 	}
383 
384 	if (disable)
385 		lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
386 	else
387 		lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
388 	return 0;
389 }
390 
391 #define QUEUE_RO_ENTRY(_prefix, _name)			\
392 static struct queue_sysfs_entry _prefix##_entry = {	\
393 	.attr	= { .name = _name, .mode = 0444 },	\
394 	.show	= _prefix##_show,			\
395 };
396 
397 #define QUEUE_RW_ENTRY(_prefix, _name)			\
398 static struct queue_sysfs_entry _prefix##_entry = {	\
399 	.attr	= { .name = _name, .mode = 0644 },	\
400 	.show	= _prefix##_show,			\
401 	.store	= _prefix##_store,			\
402 };
403 
404 #define QUEUE_LIM_RW_ENTRY(_prefix, _name)			\
405 static struct queue_sysfs_entry _prefix##_entry = {	\
406 	.attr		= { .name = _name, .mode = 0644 },	\
407 	.show		= _prefix##_show,			\
408 	.store_limit	= _prefix##_store,			\
409 }
410 
411 #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name)		\
412 static struct queue_sysfs_entry _prefix##_entry = {		\
413 	.attr		= { .name = _name, .mode = 0644 },	\
414 	.show		= _prefix##_show,			\
415 	.load_module	= _prefix##_load_module,		\
416 	.store		= _prefix##_store,			\
417 }
418 
419 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
420 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
421 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
422 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
423 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
424 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
425 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
426 QUEUE_RW_LOAD_MODULE_ENTRY(elv_iosched, "scheduler");
427 
428 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
429 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
430 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
431 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
432 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
433 
434 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
435 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
436 QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
437 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
438 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
439 
440 QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
441 QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
442 		"atomic_write_boundary_bytes");
443 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
444 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
445 
446 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
447 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
448 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
449 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
450 
451 QUEUE_RO_ENTRY(queue_zoned, "zoned");
452 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
453 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
454 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
455 
456 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
457 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
458 QUEUE_RW_ENTRY(queue_poll, "io_poll");
459 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
460 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
461 QUEUE_RO_ENTRY(queue_fua, "fua");
462 QUEUE_RO_ENTRY(queue_dax, "dax");
463 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
464 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
465 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
466 
467 /* legacy alias for logical_block_size: */
468 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
469 	.attr = {.name = "hw_sector_size", .mode = 0444 },
470 	.show = queue_logical_block_size_show,
471 };
472 
473 QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
474 QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
475 QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
476 QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
477 
478 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)479 static ssize_t queue_var_store64(s64 *var, const char *page)
480 {
481 	int err;
482 	s64 v;
483 
484 	err = kstrtos64(page, 10, &v);
485 	if (err < 0)
486 		return err;
487 
488 	*var = v;
489 	return 0;
490 }
491 
queue_wb_lat_show(struct gendisk * disk,char * page)492 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
493 {
494 	if (!wbt_rq_qos(disk->queue))
495 		return -EINVAL;
496 
497 	if (wbt_disabled(disk->queue))
498 		return sprintf(page, "0\n");
499 
500 	return sprintf(page, "%llu\n",
501 		div_u64(wbt_get_min_lat(disk->queue), 1000));
502 }
503 
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)504 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
505 				  size_t count)
506 {
507 	struct request_queue *q = disk->queue;
508 	struct rq_qos *rqos;
509 	ssize_t ret;
510 	s64 val;
511 
512 	ret = queue_var_store64(&val, page);
513 	if (ret < 0)
514 		return ret;
515 	if (val < -1)
516 		return -EINVAL;
517 
518 	rqos = wbt_rq_qos(q);
519 	if (!rqos) {
520 		ret = wbt_init(disk);
521 		if (ret)
522 			return ret;
523 	}
524 
525 	if (val == -1)
526 		val = wbt_default_latency_nsec(q);
527 	else if (val >= 0)
528 		val *= 1000ULL;
529 
530 	if (wbt_get_min_lat(q) == val)
531 		return count;
532 
533 	/*
534 	 * Ensure that the queue is idled, in case the latency update
535 	 * ends up either enabling or disabling wbt completely. We can't
536 	 * have IO inflight if that happens.
537 	 */
538 	blk_mq_quiesce_queue(q);
539 
540 	wbt_set_min_lat(q, val);
541 
542 	blk_mq_unquiesce_queue(q);
543 
544 	return count;
545 }
546 
547 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
548 #endif
549 
550 /* Common attributes for bio-based and request-based queues. */
551 static struct attribute *queue_attrs[] = {
552 	&queue_ra_entry.attr,
553 	&queue_max_hw_sectors_entry.attr,
554 	&queue_max_sectors_entry.attr,
555 	&queue_max_segments_entry.attr,
556 	&queue_max_discard_segments_entry.attr,
557 	&queue_max_integrity_segments_entry.attr,
558 	&queue_max_segment_size_entry.attr,
559 	&queue_hw_sector_size_entry.attr,
560 	&queue_logical_block_size_entry.attr,
561 	&queue_physical_block_size_entry.attr,
562 	&queue_chunk_sectors_entry.attr,
563 	&queue_io_min_entry.attr,
564 	&queue_io_opt_entry.attr,
565 	&queue_discard_granularity_entry.attr,
566 	&queue_max_discard_sectors_entry.attr,
567 	&queue_max_hw_discard_sectors_entry.attr,
568 	&queue_discard_zeroes_data_entry.attr,
569 	&queue_atomic_write_max_sectors_entry.attr,
570 	&queue_atomic_write_boundary_sectors_entry.attr,
571 	&queue_atomic_write_unit_min_entry.attr,
572 	&queue_atomic_write_unit_max_entry.attr,
573 	&queue_write_same_max_entry.attr,
574 	&queue_max_write_zeroes_sectors_entry.attr,
575 	&queue_zone_append_max_entry.attr,
576 	&queue_zone_write_granularity_entry.attr,
577 	&queue_rotational_entry.attr,
578 	&queue_zoned_entry.attr,
579 	&queue_nr_zones_entry.attr,
580 	&queue_max_open_zones_entry.attr,
581 	&queue_max_active_zones_entry.attr,
582 	&queue_nomerges_entry.attr,
583 	&queue_iostats_entry.attr,
584 	&queue_stable_writes_entry.attr,
585 	&queue_add_random_entry.attr,
586 	&queue_poll_entry.attr,
587 	&queue_wc_entry.attr,
588 	&queue_fua_entry.attr,
589 	&queue_dax_entry.attr,
590 	&queue_poll_delay_entry.attr,
591 	&queue_virt_boundary_mask_entry.attr,
592 	&queue_dma_alignment_entry.attr,
593 	NULL,
594 };
595 
596 /* Request-based queue attributes that are not relevant for bio-based queues. */
597 static struct attribute *blk_mq_queue_attrs[] = {
598 	&queue_requests_entry.attr,
599 	&elv_iosched_entry.attr,
600 	&queue_rq_affinity_entry.attr,
601 	&queue_io_timeout_entry.attr,
602 #ifdef CONFIG_BLK_WBT
603 	&queue_wb_lat_entry.attr,
604 #endif
605 	NULL,
606 };
607 
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)608 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
609 				int n)
610 {
611 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
612 	struct request_queue *q = disk->queue;
613 
614 	if ((attr == &queue_max_open_zones_entry.attr ||
615 	     attr == &queue_max_active_zones_entry.attr) &&
616 	    !blk_queue_is_zoned(q))
617 		return 0;
618 
619 	return attr->mode;
620 }
621 
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)622 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
623 					 struct attribute *attr, int n)
624 {
625 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
626 	struct request_queue *q = disk->queue;
627 
628 	if (!queue_is_mq(q))
629 		return 0;
630 
631 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
632 		return 0;
633 
634 	return attr->mode;
635 }
636 
637 static struct attribute_group queue_attr_group = {
638 	.attrs = queue_attrs,
639 	.is_visible = queue_attr_visible,
640 };
641 
642 static struct attribute_group blk_mq_queue_attr_group = {
643 	.attrs = blk_mq_queue_attrs,
644 	.is_visible = blk_mq_queue_attr_visible,
645 };
646 
647 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
648 
649 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)650 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
651 {
652 	struct queue_sysfs_entry *entry = to_queue(attr);
653 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
654 	ssize_t res;
655 
656 	if (!entry->show)
657 		return -EIO;
658 	mutex_lock(&disk->queue->sysfs_lock);
659 	res = entry->show(disk, page);
660 	mutex_unlock(&disk->queue->sysfs_lock);
661 	return res;
662 }
663 
664 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)665 queue_attr_store(struct kobject *kobj, struct attribute *attr,
666 		    const char *page, size_t length)
667 {
668 	struct queue_sysfs_entry *entry = to_queue(attr);
669 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
670 	struct request_queue *q = disk->queue;
671 	ssize_t res;
672 
673 	if (!entry->store_limit && !entry->store)
674 		return -EIO;
675 
676 	/*
677 	 * If the attribute needs to load a module, do it before freezing the
678 	 * queue to ensure that the module file can be read when the request
679 	 * queue is the one for the device storing the module file.
680 	 */
681 	if (entry->load_module) {
682 		res = entry->load_module(disk, page, length);
683 		if (res)
684 			return res;
685 	}
686 
687 	if (entry->store_limit) {
688 		struct queue_limits lim = queue_limits_start_update(q);
689 
690 		res = entry->store_limit(disk, page, length, &lim);
691 		if (res < 0) {
692 			queue_limits_cancel_update(q);
693 			return res;
694 		}
695 
696 		res = queue_limits_commit_update_frozen(q, &lim);
697 		if (res)
698 			return res;
699 		return length;
700 	}
701 
702 	mutex_lock(&q->sysfs_lock);
703 	blk_mq_freeze_queue(q);
704 	res = entry->store(disk, page, length);
705 	blk_mq_unfreeze_queue(q);
706 	mutex_unlock(&q->sysfs_lock);
707 	return res;
708 }
709 
710 static const struct sysfs_ops queue_sysfs_ops = {
711 	.show	= queue_attr_show,
712 	.store	= queue_attr_store,
713 };
714 
715 static const struct attribute_group *blk_queue_attr_groups[] = {
716 	&queue_attr_group,
717 	&blk_mq_queue_attr_group,
718 	NULL
719 };
720 
blk_queue_release(struct kobject * kobj)721 static void blk_queue_release(struct kobject *kobj)
722 {
723 	/* nothing to do here, all data is associated with the parent gendisk */
724 }
725 
726 static const struct kobj_type blk_queue_ktype = {
727 	.default_groups = blk_queue_attr_groups,
728 	.sysfs_ops	= &queue_sysfs_ops,
729 	.release	= blk_queue_release,
730 };
731 
blk_debugfs_remove(struct gendisk * disk)732 static void blk_debugfs_remove(struct gendisk *disk)
733 {
734 	struct request_queue *q = disk->queue;
735 
736 	mutex_lock(&q->debugfs_mutex);
737 	blk_trace_shutdown(q);
738 	debugfs_remove_recursive(q->debugfs_dir);
739 	q->debugfs_dir = NULL;
740 	q->sched_debugfs_dir = NULL;
741 	q->rqos_debugfs_dir = NULL;
742 	mutex_unlock(&q->debugfs_mutex);
743 }
744 
745 /**
746  * blk_register_queue - register a block layer queue with sysfs
747  * @disk: Disk of which the request queue should be registered with sysfs.
748  */
blk_register_queue(struct gendisk * disk)749 int blk_register_queue(struct gendisk *disk)
750 {
751 	struct request_queue *q = disk->queue;
752 	int ret;
753 
754 	mutex_lock(&q->sysfs_dir_lock);
755 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
756 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
757 	if (ret < 0)
758 		goto out_put_queue_kobj;
759 
760 	if (queue_is_mq(q)) {
761 		ret = blk_mq_sysfs_register(disk);
762 		if (ret)
763 			goto out_put_queue_kobj;
764 	}
765 	mutex_lock(&q->sysfs_lock);
766 
767 	mutex_lock(&q->debugfs_mutex);
768 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
769 	if (queue_is_mq(q))
770 		blk_mq_debugfs_register(q);
771 	mutex_unlock(&q->debugfs_mutex);
772 
773 	ret = disk_register_independent_access_ranges(disk);
774 	if (ret)
775 		goto out_debugfs_remove;
776 
777 	if (q->elevator) {
778 		ret = elv_register_queue(q, false);
779 		if (ret)
780 			goto out_unregister_ia_ranges;
781 	}
782 
783 	ret = blk_crypto_sysfs_register(disk);
784 	if (ret)
785 		goto out_elv_unregister;
786 
787 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
788 	wbt_enable_default(disk);
789 
790 	/* Now everything is ready and send out KOBJ_ADD uevent */
791 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
792 	if (q->elevator)
793 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
794 	mutex_unlock(&q->sysfs_lock);
795 	mutex_unlock(&q->sysfs_dir_lock);
796 
797 	/*
798 	 * SCSI probing may synchronously create and destroy a lot of
799 	 * request_queues for non-existent devices.  Shutting down a fully
800 	 * functional queue takes measureable wallclock time as RCU grace
801 	 * periods are involved.  To avoid excessive latency in these
802 	 * cases, a request_queue starts out in a degraded mode which is
803 	 * faster to shut down and is made fully functional here as
804 	 * request_queues for non-existent devices never get registered.
805 	 */
806 	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
807 	percpu_ref_switch_to_percpu(&q->q_usage_counter);
808 
809 	return ret;
810 
811 out_elv_unregister:
812 	elv_unregister_queue(q);
813 out_unregister_ia_ranges:
814 	disk_unregister_independent_access_ranges(disk);
815 out_debugfs_remove:
816 	blk_debugfs_remove(disk);
817 	mutex_unlock(&q->sysfs_lock);
818 	if (queue_is_mq(q))
819 		blk_mq_sysfs_unregister(disk);
820 out_put_queue_kobj:
821 	kobject_put(&disk->queue_kobj);
822 	mutex_unlock(&q->sysfs_dir_lock);
823 	return ret;
824 }
825 
826 /**
827  * blk_unregister_queue - counterpart of blk_register_queue()
828  * @disk: Disk of which the request queue should be unregistered from sysfs.
829  *
830  * Note: the caller is responsible for guaranteeing that this function is called
831  * after blk_register_queue() has finished.
832  */
blk_unregister_queue(struct gendisk * disk)833 void blk_unregister_queue(struct gendisk *disk)
834 {
835 	struct request_queue *q = disk->queue;
836 
837 	if (WARN_ON(!q))
838 		return;
839 
840 	/* Return early if disk->queue was never registered. */
841 	if (!blk_queue_registered(q))
842 		return;
843 
844 	/*
845 	 * Since sysfs_remove_dir() prevents adding new directory entries
846 	 * before removal of existing entries starts, protect against
847 	 * concurrent elv_iosched_store() calls.
848 	 */
849 	mutex_lock(&q->sysfs_lock);
850 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
851 	mutex_unlock(&q->sysfs_lock);
852 
853 	mutex_lock(&q->sysfs_dir_lock);
854 	/*
855 	 * Remove the sysfs attributes before unregistering the queue data
856 	 * structures that can be modified through sysfs.
857 	 */
858 	if (queue_is_mq(q))
859 		blk_mq_sysfs_unregister(disk);
860 	blk_crypto_sysfs_unregister(disk);
861 
862 	mutex_lock(&q->sysfs_lock);
863 	elv_unregister_queue(q);
864 	disk_unregister_independent_access_ranges(disk);
865 	mutex_unlock(&q->sysfs_lock);
866 
867 	/* Now that we've deleted all child objects, we can delete the queue. */
868 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
869 	kobject_del(&disk->queue_kobj);
870 	mutex_unlock(&q->sysfs_dir_lock);
871 
872 	blk_debugfs_remove(disk);
873 	kobject_put(&disk->queue_kobj);
874 }
875