• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 static const char * const cache_replacement_policies[] = {
20 	"lru",
21 	"fifo",
22 	"random",
23 	NULL
24 };
25 
26 static const char * const error_actions[] = {
27 	"unregister",
28 	"panic",
29 	NULL
30 };
31 
32 write_attribute(attach);
33 write_attribute(detach);
34 write_attribute(unregister);
35 write_attribute(stop);
36 write_attribute(clear_stats);
37 write_attribute(trigger_gc);
38 write_attribute(prune_cache);
39 write_attribute(flash_vol_create);
40 
41 read_attribute(bucket_size);
42 read_attribute(block_size);
43 read_attribute(nbuckets);
44 read_attribute(tree_depth);
45 read_attribute(root_usage_percent);
46 read_attribute(priority_stats);
47 read_attribute(btree_cache_size);
48 read_attribute(btree_cache_max_chain);
49 read_attribute(cache_available_percent);
50 read_attribute(written);
51 read_attribute(btree_written);
52 read_attribute(metadata_written);
53 read_attribute(active_journal_entries);
54 
55 sysfs_time_stats_attribute(btree_gc,	sec, ms);
56 sysfs_time_stats_attribute(btree_split, sec, us);
57 sysfs_time_stats_attribute(btree_sort,	ms,  us);
58 sysfs_time_stats_attribute(btree_read,	ms,  us);
59 
60 read_attribute(btree_nodes);
61 read_attribute(btree_used_percent);
62 read_attribute(average_key_size);
63 read_attribute(dirty_data);
64 read_attribute(bset_tree_stats);
65 
66 read_attribute(state);
67 read_attribute(cache_read_races);
68 read_attribute(writeback_keys_done);
69 read_attribute(writeback_keys_failed);
70 read_attribute(io_errors);
71 read_attribute(congested);
72 rw_attribute(congested_read_threshold_us);
73 rw_attribute(congested_write_threshold_us);
74 
75 rw_attribute(sequential_cutoff);
76 rw_attribute(data_csum);
77 rw_attribute(cache_mode);
78 rw_attribute(writeback_metadata);
79 rw_attribute(writeback_running);
80 rw_attribute(writeback_percent);
81 rw_attribute(writeback_delay);
82 rw_attribute(writeback_rate);
83 
84 rw_attribute(writeback_rate_update_seconds);
85 rw_attribute(writeback_rate_d_term);
86 rw_attribute(writeback_rate_p_term_inverse);
87 read_attribute(writeback_rate_debug);
88 
89 read_attribute(stripe_size);
90 read_attribute(partial_stripes_expensive);
91 
92 rw_attribute(synchronous);
93 rw_attribute(journal_delay_ms);
94 rw_attribute(discard);
95 rw_attribute(running);
96 rw_attribute(label);
97 rw_attribute(readahead);
98 rw_attribute(errors);
99 rw_attribute(io_error_limit);
100 rw_attribute(io_error_halflife);
101 rw_attribute(verify);
102 rw_attribute(bypass_torture_test);
103 rw_attribute(key_merging_disabled);
104 rw_attribute(gc_always_rewrite);
105 rw_attribute(expensive_debug_checks);
106 rw_attribute(cache_replacement_policy);
107 rw_attribute(btree_shrinker_disabled);
108 rw_attribute(copy_gc_enabled);
109 rw_attribute(size);
110 
SHOW(__bch_cached_dev)111 SHOW(__bch_cached_dev)
112 {
113 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
114 					     disk.kobj);
115 	const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
116 
117 #define var(stat)		(dc->stat)
118 
119 	if (attr == &sysfs_cache_mode)
120 		return bch_snprint_string_list(buf, PAGE_SIZE,
121 					       bch_cache_modes + 1,
122 					       BDEV_CACHE_MODE(&dc->sb));
123 
124 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
125 	var_printf(verify,		"%i");
126 	var_printf(bypass_torture_test,	"%i");
127 	var_printf(writeback_metadata,	"%i");
128 	var_printf(writeback_running,	"%i");
129 	var_print(writeback_delay);
130 	var_print(writeback_percent);
131 	sysfs_hprint(writeback_rate,	dc->writeback_rate.rate << 9);
132 
133 	var_print(writeback_rate_update_seconds);
134 	var_print(writeback_rate_d_term);
135 	var_print(writeback_rate_p_term_inverse);
136 
137 	if (attr == &sysfs_writeback_rate_debug) {
138 		char rate[20];
139 		char dirty[20];
140 		char target[20];
141 		char proportional[20];
142 		char derivative[20];
143 		char change[20];
144 		s64 next_io;
145 
146 		bch_hprint(rate,	dc->writeback_rate.rate << 9);
147 		bch_hprint(dirty,	bcache_dev_sectors_dirty(&dc->disk) << 9);
148 		bch_hprint(target,	dc->writeback_rate_target << 9);
149 		bch_hprint(proportional,dc->writeback_rate_proportional << 9);
150 		bch_hprint(derivative,	dc->writeback_rate_derivative << 9);
151 		bch_hprint(change,	dc->writeback_rate_change << 9);
152 
153 		next_io = div64_s64(dc->writeback_rate.next - local_clock(),
154 				    NSEC_PER_MSEC);
155 
156 		return sprintf(buf,
157 			       "rate:\t\t%s/sec\n"
158 			       "dirty:\t\t%s\n"
159 			       "target:\t\t%s\n"
160 			       "proportional:\t%s\n"
161 			       "derivative:\t%s\n"
162 			       "change:\t\t%s/sec\n"
163 			       "next io:\t%llims\n",
164 			       rate, dirty, target, proportional,
165 			       derivative, change, next_io);
166 	}
167 
168 	sysfs_hprint(dirty_data,
169 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
170 
171 	sysfs_hprint(stripe_size,	dc->disk.stripe_size << 9);
172 	var_printf(partial_stripes_expensive,	"%u");
173 
174 	var_hprint(sequential_cutoff);
175 	var_hprint(readahead);
176 
177 	sysfs_print(running,		atomic_read(&dc->running));
178 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
179 
180 	if (attr == &sysfs_label) {
181 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
182 		buf[SB_LABEL_SIZE + 1] = '\0';
183 		strcat(buf, "\n");
184 		return strlen(buf);
185 	}
186 
187 #undef var
188 	return 0;
189 }
190 SHOW_LOCKED(bch_cached_dev)
191 
STORE(__cached_dev)192 STORE(__cached_dev)
193 {
194 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
195 					     disk.kobj);
196 	ssize_t v;
197 	struct cache_set *c;
198 	struct kobj_uevent_env *env;
199 
200 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
201 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
202 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
203 
204 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
205 	d_strtoul(verify);
206 	d_strtoul(bypass_torture_test);
207 	d_strtoul(writeback_metadata);
208 	d_strtoul(writeback_running);
209 	d_strtoul(writeback_delay);
210 
211 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
212 
213 	sysfs_strtoul_clamp(writeback_rate,
214 			    dc->writeback_rate.rate, 1, INT_MAX);
215 
216 	d_strtoul_nonzero(writeback_rate_update_seconds);
217 	d_strtoul(writeback_rate_d_term);
218 	d_strtoul_nonzero(writeback_rate_p_term_inverse);
219 
220 	sysfs_strtoul_clamp(sequential_cutoff,
221 			    dc->sequential_cutoff,
222 			    0, UINT_MAX);
223 	d_strtoi_h(readahead);
224 
225 	if (attr == &sysfs_clear_stats)
226 		bch_cache_accounting_clear(&dc->accounting);
227 
228 	if (attr == &sysfs_running &&
229 	    strtoul_or_return(buf))
230 		bch_cached_dev_run(dc);
231 
232 	if (attr == &sysfs_cache_mode) {
233 		v = bch_read_string_list(buf, bch_cache_modes + 1);
234 
235 		if (v < 0)
236 			return v;
237 
238 		if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
239 			SET_BDEV_CACHE_MODE(&dc->sb, v);
240 			bch_write_bdev_super(dc, NULL);
241 		}
242 	}
243 
244 	if (attr == &sysfs_label) {
245 		if (size > SB_LABEL_SIZE)
246 			return -EINVAL;
247 		memcpy(dc->sb.label, buf, size);
248 		if (size < SB_LABEL_SIZE)
249 			dc->sb.label[size] = '\0';
250 		if (size && dc->sb.label[size - 1] == '\n')
251 			dc->sb.label[size - 1] = '\0';
252 		bch_write_bdev_super(dc, NULL);
253 		if (dc->disk.c) {
254 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
255 			       buf, SB_LABEL_SIZE);
256 			bch_uuid_write(dc->disk.c);
257 		}
258 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
259 		if (!env)
260 			return -ENOMEM;
261 		add_uevent_var(env, "DRIVER=bcache");
262 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
263 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
264 		kobject_uevent_env(
265 			&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
266 		kfree(env);
267 	}
268 
269 	if (attr == &sysfs_attach) {
270 		uint8_t		set_uuid[16];
271 
272 		if (bch_parse_uuid(buf, set_uuid) < 16)
273 			return -EINVAL;
274 
275 		v = -ENOENT;
276 		list_for_each_entry(c, &bch_cache_sets, list) {
277 			v = bch_cached_dev_attach(dc, c, set_uuid);
278 			if (!v)
279 				return size;
280 		}
281 
282 		pr_err("Can't attach %s: cache set not found", buf);
283 		return v;
284 	}
285 
286 	if (attr == &sysfs_detach && dc->disk.c)
287 		bch_cached_dev_detach(dc);
288 
289 	if (attr == &sysfs_stop)
290 		bcache_device_stop(&dc->disk);
291 
292 	return size;
293 }
294 
STORE(bch_cached_dev)295 STORE(bch_cached_dev)
296 {
297 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
298 					     disk.kobj);
299 
300 	mutex_lock(&bch_register_lock);
301 	size = __cached_dev_store(kobj, attr, buf, size);
302 
303 	if (attr == &sysfs_writeback_running)
304 		bch_writeback_queue(dc);
305 
306 	if (attr == &sysfs_writeback_percent)
307 		schedule_delayed_work(&dc->writeback_rate_update,
308 				      dc->writeback_rate_update_seconds * HZ);
309 
310 	mutex_unlock(&bch_register_lock);
311 	return size;
312 }
313 
314 static struct attribute *bch_cached_dev_files[] = {
315 	&sysfs_attach,
316 	&sysfs_detach,
317 	&sysfs_stop,
318 #if 0
319 	&sysfs_data_csum,
320 #endif
321 	&sysfs_cache_mode,
322 	&sysfs_writeback_metadata,
323 	&sysfs_writeback_running,
324 	&sysfs_writeback_delay,
325 	&sysfs_writeback_percent,
326 	&sysfs_writeback_rate,
327 	&sysfs_writeback_rate_update_seconds,
328 	&sysfs_writeback_rate_d_term,
329 	&sysfs_writeback_rate_p_term_inverse,
330 	&sysfs_writeback_rate_debug,
331 	&sysfs_dirty_data,
332 	&sysfs_stripe_size,
333 	&sysfs_partial_stripes_expensive,
334 	&sysfs_sequential_cutoff,
335 	&sysfs_clear_stats,
336 	&sysfs_running,
337 	&sysfs_state,
338 	&sysfs_label,
339 	&sysfs_readahead,
340 #ifdef CONFIG_BCACHE_DEBUG
341 	&sysfs_verify,
342 	&sysfs_bypass_torture_test,
343 #endif
344 	NULL
345 };
346 KTYPE(bch_cached_dev);
347 
SHOW(bch_flash_dev)348 SHOW(bch_flash_dev)
349 {
350 	struct bcache_device *d = container_of(kobj, struct bcache_device,
351 					       kobj);
352 	struct uuid_entry *u = &d->c->uuids[d->id];
353 
354 	sysfs_printf(data_csum,	"%i", d->data_csum);
355 	sysfs_hprint(size,	u->sectors << 9);
356 
357 	if (attr == &sysfs_label) {
358 		memcpy(buf, u->label, SB_LABEL_SIZE);
359 		buf[SB_LABEL_SIZE + 1] = '\0';
360 		strcat(buf, "\n");
361 		return strlen(buf);
362 	}
363 
364 	return 0;
365 }
366 
STORE(__bch_flash_dev)367 STORE(__bch_flash_dev)
368 {
369 	struct bcache_device *d = container_of(kobj, struct bcache_device,
370 					       kobj);
371 	struct uuid_entry *u = &d->c->uuids[d->id];
372 
373 	sysfs_strtoul(data_csum,	d->data_csum);
374 
375 	if (attr == &sysfs_size) {
376 		uint64_t v;
377 		strtoi_h_or_return(buf, v);
378 
379 		u->sectors = v >> 9;
380 		bch_uuid_write(d->c);
381 		set_capacity(d->disk, u->sectors);
382 	}
383 
384 	if (attr == &sysfs_label) {
385 		memcpy(u->label, buf, SB_LABEL_SIZE);
386 		bch_uuid_write(d->c);
387 	}
388 
389 	if (attr == &sysfs_unregister) {
390 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
391 		bcache_device_stop(d);
392 	}
393 
394 	return size;
395 }
396 STORE_LOCKED(bch_flash_dev)
397 
398 static struct attribute *bch_flash_dev_files[] = {
399 	&sysfs_unregister,
400 #if 0
401 	&sysfs_data_csum,
402 #endif
403 	&sysfs_label,
404 	&sysfs_size,
405 	NULL
406 };
407 KTYPE(bch_flash_dev);
408 
409 struct bset_stats_op {
410 	struct btree_op op;
411 	size_t nodes;
412 	struct bset_stats stats;
413 };
414 
bch_btree_bset_stats(struct btree_op * b_op,struct btree * b)415 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
416 {
417 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
418 
419 	op->nodes++;
420 	bch_btree_keys_stats(&b->keys, &op->stats);
421 
422 	return MAP_CONTINUE;
423 }
424 
bch_bset_print_stats(struct cache_set * c,char * buf)425 static int bch_bset_print_stats(struct cache_set *c, char *buf)
426 {
427 	struct bset_stats_op op;
428 	int ret;
429 
430 	memset(&op, 0, sizeof(op));
431 	bch_btree_op_init(&op.op, -1);
432 
433 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
434 	if (ret < 0)
435 		return ret;
436 
437 	return snprintf(buf, PAGE_SIZE,
438 			"btree nodes:		%zu\n"
439 			"written sets:		%zu\n"
440 			"unwritten sets:		%zu\n"
441 			"written key bytes:	%zu\n"
442 			"unwritten key bytes:	%zu\n"
443 			"floats:			%zu\n"
444 			"failed:			%zu\n",
445 			op.nodes,
446 			op.stats.sets_written, op.stats.sets_unwritten,
447 			op.stats.bytes_written, op.stats.bytes_unwritten,
448 			op.stats.floats, op.stats.failed);
449 }
450 
bch_root_usage(struct cache_set * c)451 static unsigned bch_root_usage(struct cache_set *c)
452 {
453 	unsigned bytes = 0;
454 	struct bkey *k;
455 	struct btree *b;
456 	struct btree_iter iter;
457 
458 	goto lock_root;
459 
460 	do {
461 		rw_unlock(false, b);
462 lock_root:
463 		b = c->root;
464 		rw_lock(false, b, b->level);
465 	} while (b != c->root);
466 
467 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
468 		bytes += bkey_bytes(k);
469 
470 	rw_unlock(false, b);
471 
472 	return (bytes * 100) / btree_bytes(c);
473 }
474 
bch_cache_size(struct cache_set * c)475 static size_t bch_cache_size(struct cache_set *c)
476 {
477 	size_t ret = 0;
478 	struct btree *b;
479 
480 	mutex_lock(&c->bucket_lock);
481 	list_for_each_entry(b, &c->btree_cache, list)
482 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
483 
484 	mutex_unlock(&c->bucket_lock);
485 	return ret;
486 }
487 
bch_cache_max_chain(struct cache_set * c)488 static unsigned bch_cache_max_chain(struct cache_set *c)
489 {
490 	unsigned ret = 0;
491 	struct hlist_head *h;
492 
493 	mutex_lock(&c->bucket_lock);
494 
495 	for (h = c->bucket_hash;
496 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
497 	     h++) {
498 		unsigned i = 0;
499 		struct hlist_node *p;
500 
501 		hlist_for_each(p, h)
502 			i++;
503 
504 		ret = max(ret, i);
505 	}
506 
507 	mutex_unlock(&c->bucket_lock);
508 	return ret;
509 }
510 
bch_btree_used(struct cache_set * c)511 static unsigned bch_btree_used(struct cache_set *c)
512 {
513 	return div64_u64(c->gc_stats.key_bytes * 100,
514 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
515 }
516 
bch_average_key_size(struct cache_set * c)517 static unsigned bch_average_key_size(struct cache_set *c)
518 {
519 	return c->gc_stats.nkeys
520 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
521 		: 0;
522 }
523 
SHOW(__bch_cache_set)524 SHOW(__bch_cache_set)
525 {
526 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
527 
528 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
529 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
530 	sysfs_hprint(bucket_size,		bucket_bytes(c));
531 	sysfs_hprint(block_size,		block_bytes(c));
532 	sysfs_print(tree_depth,			c->root->level);
533 	sysfs_print(root_usage_percent,		bch_root_usage(c));
534 
535 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
536 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
537 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
538 
539 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
540 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
541 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
542 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
543 
544 	sysfs_print(btree_used_percent,	bch_btree_used(c));
545 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
546 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
547 
548 	sysfs_print(cache_read_races,
549 		    atomic_long_read(&c->cache_read_races));
550 
551 	sysfs_print(writeback_keys_done,
552 		    atomic_long_read(&c->writeback_keys_done));
553 	sysfs_print(writeback_keys_failed,
554 		    atomic_long_read(&c->writeback_keys_failed));
555 
556 	if (attr == &sysfs_errors)
557 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
558 					       c->on_error);
559 
560 	/* See count_io_errors for why 88 */
561 	sysfs_print(io_error_halflife,	c->error_decay * 88);
562 	sysfs_print(io_error_limit,	c->error_limit >> IO_ERROR_SHIFT);
563 
564 	sysfs_hprint(congested,
565 		     ((uint64_t) bch_get_congested(c)) << 9);
566 	sysfs_print(congested_read_threshold_us,
567 		    c->congested_read_threshold_us);
568 	sysfs_print(congested_write_threshold_us,
569 		    c->congested_write_threshold_us);
570 
571 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
572 	sysfs_printf(verify,			"%i", c->verify);
573 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
574 	sysfs_printf(expensive_debug_checks,
575 		     "%i", c->expensive_debug_checks);
576 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
577 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
578 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
579 
580 	if (attr == &sysfs_bset_tree_stats)
581 		return bch_bset_print_stats(c, buf);
582 
583 	return 0;
584 }
585 SHOW_LOCKED(bch_cache_set)
586 
STORE(__bch_cache_set)587 STORE(__bch_cache_set)
588 {
589 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
590 
591 	if (attr == &sysfs_unregister)
592 		bch_cache_set_unregister(c);
593 
594 	if (attr == &sysfs_stop)
595 		bch_cache_set_stop(c);
596 
597 	if (attr == &sysfs_synchronous) {
598 		bool sync = strtoul_or_return(buf);
599 
600 		if (sync != CACHE_SYNC(&c->sb)) {
601 			SET_CACHE_SYNC(&c->sb, sync);
602 			bcache_write_super(c);
603 		}
604 	}
605 
606 	if (attr == &sysfs_flash_vol_create) {
607 		int r;
608 		uint64_t v;
609 		strtoi_h_or_return(buf, v);
610 
611 		r = bch_flash_dev_create(c, v);
612 		if (r)
613 			return r;
614 	}
615 
616 	if (attr == &sysfs_clear_stats) {
617 		atomic_long_set(&c->writeback_keys_done,	0);
618 		atomic_long_set(&c->writeback_keys_failed,	0);
619 
620 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
621 		bch_cache_accounting_clear(&c->accounting);
622 	}
623 
624 	if (attr == &sysfs_trigger_gc) {
625 		/*
626 		 * Garbage collection thread only works when sectors_to_gc < 0,
627 		 * when users write to sysfs entry trigger_gc, most of time
628 		 * they want to forcibly triger gargage collection. Here -1 is
629 		 * set to c->sectors_to_gc, to make gc_should_run() give a
630 		 * chance to permit gc thread to run. "give a chance" means
631 		 * before going into gc_should_run(), there is still chance
632 		 * that c->sectors_to_gc being set to other positive value. So
633 		 * writing sysfs entry trigger_gc won't always make sure gc
634 		 * thread takes effect.
635 		 */
636 		atomic_set(&c->sectors_to_gc, -1);
637 		wake_up_gc(c);
638 	}
639 
640 	if (attr == &sysfs_prune_cache) {
641 		struct shrink_control sc;
642 		sc.gfp_mask = GFP_KERNEL;
643 		sc.nr_to_scan = strtoul_or_return(buf);
644 		c->shrink.scan_objects(&c->shrink, &sc);
645 	}
646 
647 	sysfs_strtoul(congested_read_threshold_us,
648 		      c->congested_read_threshold_us);
649 	sysfs_strtoul(congested_write_threshold_us,
650 		      c->congested_write_threshold_us);
651 
652 	if (attr == &sysfs_errors) {
653 		ssize_t v = bch_read_string_list(buf, error_actions);
654 
655 		if (v < 0)
656 			return v;
657 
658 		c->on_error = v;
659 	}
660 
661 	if (attr == &sysfs_io_error_limit)
662 		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
663 
664 	/* See count_io_errors() for why 88 */
665 	if (attr == &sysfs_io_error_halflife) {
666 		unsigned long v = 0;
667 		ssize_t ret;
668 
669 		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
670 		if (!ret) {
671 			c->error_decay = v / 88;
672 			return size;
673 		}
674 		return ret;
675 	}
676 
677 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
678 	sysfs_strtoul(verify,			c->verify);
679 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
680 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
681 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
682 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
683 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
684 
685 	return size;
686 }
687 STORE_LOCKED(bch_cache_set)
688 
SHOW(bch_cache_set_internal)689 SHOW(bch_cache_set_internal)
690 {
691 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
692 	return bch_cache_set_show(&c->kobj, attr, buf);
693 }
694 
STORE(bch_cache_set_internal)695 STORE(bch_cache_set_internal)
696 {
697 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
698 	return bch_cache_set_store(&c->kobj, attr, buf, size);
699 }
700 
bch_cache_set_internal_release(struct kobject * k)701 static void bch_cache_set_internal_release(struct kobject *k)
702 {
703 }
704 
705 static struct attribute *bch_cache_set_files[] = {
706 	&sysfs_unregister,
707 	&sysfs_stop,
708 	&sysfs_synchronous,
709 	&sysfs_journal_delay_ms,
710 	&sysfs_flash_vol_create,
711 
712 	&sysfs_bucket_size,
713 	&sysfs_block_size,
714 	&sysfs_tree_depth,
715 	&sysfs_root_usage_percent,
716 	&sysfs_btree_cache_size,
717 	&sysfs_cache_available_percent,
718 
719 	&sysfs_average_key_size,
720 
721 	&sysfs_errors,
722 	&sysfs_io_error_limit,
723 	&sysfs_io_error_halflife,
724 	&sysfs_congested,
725 	&sysfs_congested_read_threshold_us,
726 	&sysfs_congested_write_threshold_us,
727 	&sysfs_clear_stats,
728 	NULL
729 };
730 KTYPE(bch_cache_set);
731 
732 static struct attribute *bch_cache_set_internal_files[] = {
733 	&sysfs_active_journal_entries,
734 
735 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
736 	sysfs_time_stats_attribute_list(btree_split, sec, us)
737 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
738 	sysfs_time_stats_attribute_list(btree_read, ms, us)
739 
740 	&sysfs_btree_nodes,
741 	&sysfs_btree_used_percent,
742 	&sysfs_btree_cache_max_chain,
743 
744 	&sysfs_bset_tree_stats,
745 	&sysfs_cache_read_races,
746 	&sysfs_writeback_keys_done,
747 	&sysfs_writeback_keys_failed,
748 
749 	&sysfs_trigger_gc,
750 	&sysfs_prune_cache,
751 #ifdef CONFIG_BCACHE_DEBUG
752 	&sysfs_verify,
753 	&sysfs_key_merging_disabled,
754 	&sysfs_expensive_debug_checks,
755 #endif
756 	&sysfs_gc_always_rewrite,
757 	&sysfs_btree_shrinker_disabled,
758 	&sysfs_copy_gc_enabled,
759 	NULL
760 };
761 KTYPE(bch_cache_set_internal);
762 
SHOW(__bch_cache)763 SHOW(__bch_cache)
764 {
765 	struct cache *ca = container_of(kobj, struct cache, kobj);
766 
767 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
768 	sysfs_hprint(block_size,	block_bytes(ca));
769 	sysfs_print(nbuckets,		ca->sb.nbuckets);
770 	sysfs_print(discard,		ca->discard);
771 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
772 	sysfs_hprint(btree_written,
773 		     atomic_long_read(&ca->btree_sectors_written) << 9);
774 	sysfs_hprint(metadata_written,
775 		     (atomic_long_read(&ca->meta_sectors_written) +
776 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
777 
778 	sysfs_print(io_errors,
779 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
780 
781 	if (attr == &sysfs_cache_replacement_policy)
782 		return bch_snprint_string_list(buf, PAGE_SIZE,
783 					       cache_replacement_policies,
784 					       CACHE_REPLACEMENT(&ca->sb));
785 
786 	if (attr == &sysfs_priority_stats) {
787 		int cmp(const void *l, const void *r)
788 		{	return *((uint16_t *) r) - *((uint16_t *) l); }
789 
790 		struct bucket *b;
791 		size_t n = ca->sb.nbuckets, i;
792 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
793 		uint64_t sum = 0;
794 		/* Compute 31 quantiles */
795 		uint16_t q[31], *p, *cached;
796 		ssize_t ret;
797 
798 		cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
799 		if (!p)
800 			return -ENOMEM;
801 
802 		mutex_lock(&ca->set->bucket_lock);
803 		for_each_bucket(b, ca) {
804 			if (!GC_SECTORS_USED(b))
805 				unused++;
806 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
807 				available++;
808 			if (GC_MARK(b) == GC_MARK_DIRTY)
809 				dirty++;
810 			if (GC_MARK(b) == GC_MARK_METADATA)
811 				meta++;
812 		}
813 
814 		for (i = ca->sb.first_bucket; i < n; i++)
815 			p[i] = ca->buckets[i].prio;
816 		mutex_unlock(&ca->set->bucket_lock);
817 
818 		sort(p, n, sizeof(uint16_t), cmp, NULL);
819 
820 		while (n &&
821 		       !cached[n - 1])
822 			--n;
823 
824 		unused = ca->sb.nbuckets - n;
825 
826 		while (cached < p + n &&
827 		       *cached == BTREE_PRIO)
828 			cached++, n--;
829 
830 		for (i = 0; i < n; i++)
831 			sum += INITIAL_PRIO - cached[i];
832 
833 		if (n)
834 			do_div(sum, n);
835 
836 		for (i = 0; i < ARRAY_SIZE(q); i++)
837 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
838 				(ARRAY_SIZE(q) + 1)];
839 
840 		vfree(p);
841 
842 		ret = scnprintf(buf, PAGE_SIZE,
843 				"Unused:		%zu%%\n"
844 				"Clean:		%zu%%\n"
845 				"Dirty:		%zu%%\n"
846 				"Metadata:	%zu%%\n"
847 				"Average:	%llu\n"
848 				"Sectors per Q:	%zu\n"
849 				"Quantiles:	[",
850 				unused * 100 / (size_t) ca->sb.nbuckets,
851 				available * 100 / (size_t) ca->sb.nbuckets,
852 				dirty * 100 / (size_t) ca->sb.nbuckets,
853 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
854 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
855 
856 		for (i = 0; i < ARRAY_SIZE(q); i++)
857 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
858 					 "%u ", q[i]);
859 		ret--;
860 
861 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
862 
863 		return ret;
864 	}
865 
866 	return 0;
867 }
868 SHOW_LOCKED(bch_cache)
869 
STORE(__bch_cache)870 STORE(__bch_cache)
871 {
872 	struct cache *ca = container_of(kobj, struct cache, kobj);
873 
874 	if (attr == &sysfs_discard) {
875 		bool v = strtoul_or_return(buf);
876 
877 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
878 			ca->discard = v;
879 
880 		if (v != CACHE_DISCARD(&ca->sb)) {
881 			SET_CACHE_DISCARD(&ca->sb, v);
882 			bcache_write_super(ca->set);
883 		}
884 	}
885 
886 	if (attr == &sysfs_cache_replacement_policy) {
887 		ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
888 
889 		if (v < 0)
890 			return v;
891 
892 		if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
893 			mutex_lock(&ca->set->bucket_lock);
894 			SET_CACHE_REPLACEMENT(&ca->sb, v);
895 			mutex_unlock(&ca->set->bucket_lock);
896 
897 			bcache_write_super(ca->set);
898 		}
899 	}
900 
901 	if (attr == &sysfs_clear_stats) {
902 		atomic_long_set(&ca->sectors_written, 0);
903 		atomic_long_set(&ca->btree_sectors_written, 0);
904 		atomic_long_set(&ca->meta_sectors_written, 0);
905 		atomic_set(&ca->io_count, 0);
906 		atomic_set(&ca->io_errors, 0);
907 	}
908 
909 	return size;
910 }
911 STORE_LOCKED(bch_cache)
912 
913 static struct attribute *bch_cache_files[] = {
914 	&sysfs_bucket_size,
915 	&sysfs_block_size,
916 	&sysfs_nbuckets,
917 	&sysfs_priority_stats,
918 	&sysfs_discard,
919 	&sysfs_written,
920 	&sysfs_btree_written,
921 	&sysfs_metadata_written,
922 	&sysfs_io_errors,
923 	&sysfs_clear_stats,
924 	&sysfs_cache_replacement_policy,
925 	NULL
926 };
927 KTYPE(bch_cache);
928