• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes[] = {
21 	"writethrough",
22 	"writeback",
23 	"writearound",
24 	"none",
25 	NULL
26 };
27 
28 static const char * const bch_reada_cache_policies[] = {
29 	"all",
30 	"meta-only",
31 	NULL
32 };
33 
34 /* Default is -1; we skip past it for stop_when_cache_set_failed */
35 static const char * const bch_stop_on_failure_modes[] = {
36 	"auto",
37 	"always",
38 	NULL
39 };
40 
41 static const char * const cache_replacement_policies[] = {
42 	"lru",
43 	"fifo",
44 	"random",
45 	NULL
46 };
47 
48 static const char * const error_actions[] = {
49 	"unregister",
50 	"panic",
51 	NULL
52 };
53 
54 write_attribute(attach);
55 write_attribute(detach);
56 write_attribute(unregister);
57 write_attribute(stop);
58 write_attribute(clear_stats);
59 write_attribute(trigger_gc);
60 write_attribute(prune_cache);
61 write_attribute(flash_vol_create);
62 
63 read_attribute(bucket_size);
64 read_attribute(block_size);
65 read_attribute(nbuckets);
66 read_attribute(tree_depth);
67 read_attribute(root_usage_percent);
68 read_attribute(priority_stats);
69 read_attribute(btree_cache_size);
70 read_attribute(btree_cache_max_chain);
71 read_attribute(cache_available_percent);
72 read_attribute(written);
73 read_attribute(btree_written);
74 read_attribute(metadata_written);
75 read_attribute(active_journal_entries);
76 
77 sysfs_time_stats_attribute(btree_gc,	sec, ms);
78 sysfs_time_stats_attribute(btree_split, sec, us);
79 sysfs_time_stats_attribute(btree_sort,	ms,  us);
80 sysfs_time_stats_attribute(btree_read,	ms,  us);
81 
82 read_attribute(btree_nodes);
83 read_attribute(btree_used_percent);
84 read_attribute(average_key_size);
85 read_attribute(dirty_data);
86 read_attribute(bset_tree_stats);
87 
88 read_attribute(state);
89 read_attribute(cache_read_races);
90 read_attribute(reclaim);
91 read_attribute(flush_write);
92 read_attribute(retry_flush_write);
93 read_attribute(writeback_keys_done);
94 read_attribute(writeback_keys_failed);
95 read_attribute(io_errors);
96 read_attribute(congested);
97 rw_attribute(congested_read_threshold_us);
98 rw_attribute(congested_write_threshold_us);
99 
100 rw_attribute(sequential_cutoff);
101 rw_attribute(data_csum);
102 rw_attribute(cache_mode);
103 rw_attribute(readahead_cache_policy);
104 rw_attribute(stop_when_cache_set_failed);
105 rw_attribute(writeback_metadata);
106 rw_attribute(writeback_running);
107 rw_attribute(writeback_percent);
108 rw_attribute(writeback_delay);
109 rw_attribute(writeback_rate);
110 
111 rw_attribute(writeback_rate_update_seconds);
112 rw_attribute(writeback_rate_i_term_inverse);
113 rw_attribute(writeback_rate_p_term_inverse);
114 rw_attribute(writeback_rate_minimum);
115 read_attribute(writeback_rate_debug);
116 
117 read_attribute(stripe_size);
118 read_attribute(partial_stripes_expensive);
119 
120 rw_attribute(synchronous);
121 rw_attribute(journal_delay_ms);
122 rw_attribute(io_disable);
123 rw_attribute(discard);
124 rw_attribute(running);
125 rw_attribute(label);
126 rw_attribute(readahead);
127 rw_attribute(errors);
128 rw_attribute(io_error_limit);
129 rw_attribute(io_error_halflife);
130 rw_attribute(verify);
131 rw_attribute(bypass_torture_test);
132 rw_attribute(key_merging_disabled);
133 rw_attribute(gc_always_rewrite);
134 rw_attribute(expensive_debug_checks);
135 rw_attribute(cache_replacement_policy);
136 rw_attribute(btree_shrinker_disabled);
137 rw_attribute(copy_gc_enabled);
138 rw_attribute(size);
139 
bch_snprint_string_list(char * buf,size_t size,const char * const list[],size_t selected)140 static ssize_t bch_snprint_string_list(char *buf,
141 				       size_t size,
142 				       const char * const list[],
143 				       size_t selected)
144 {
145 	char *out = buf;
146 	size_t i;
147 
148 	for (i = 0; list[i]; i++)
149 		out += snprintf(out, buf + size - out,
150 				i == selected ? "[%s] " : "%s ", list[i]);
151 
152 	out[-1] = '\n';
153 	return out - buf;
154 }
155 
SHOW(__bch_cached_dev)156 SHOW(__bch_cached_dev)
157 {
158 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
159 					     disk.kobj);
160 	char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
161 	int wb = dc->writeback_running;
162 
163 #define var(stat)		(dc->stat)
164 
165 	if (attr == &sysfs_cache_mode)
166 		return bch_snprint_string_list(buf, PAGE_SIZE,
167 					       bch_cache_modes,
168 					       BDEV_CACHE_MODE(&dc->sb));
169 
170 	if (attr == &sysfs_readahead_cache_policy)
171 		return bch_snprint_string_list(buf, PAGE_SIZE,
172 					      bch_reada_cache_policies,
173 					      dc->cache_readahead_policy);
174 
175 	if (attr == &sysfs_stop_when_cache_set_failed)
176 		return bch_snprint_string_list(buf, PAGE_SIZE,
177 					       bch_stop_on_failure_modes,
178 					       dc->stop_when_cache_set_failed);
179 
180 
181 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
182 	var_printf(verify,		"%i");
183 	var_printf(bypass_torture_test,	"%i");
184 	var_printf(writeback_metadata,	"%i");
185 	var_printf(writeback_running,	"%i");
186 	var_print(writeback_delay);
187 	var_print(writeback_percent);
188 	sysfs_hprint(writeback_rate,
189 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
190 	sysfs_printf(io_errors,		"%i", atomic_read(&dc->io_errors));
191 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
192 	sysfs_printf(io_disable,	"%i", dc->io_disable);
193 	var_print(writeback_rate_update_seconds);
194 	var_print(writeback_rate_i_term_inverse);
195 	var_print(writeback_rate_p_term_inverse);
196 	var_print(writeback_rate_minimum);
197 
198 	if (attr == &sysfs_writeback_rate_debug) {
199 		char rate[20];
200 		char dirty[20];
201 		char target[20];
202 		char proportional[20];
203 		char integral[20];
204 		char change[20];
205 		s64 next_io;
206 
207 		/*
208 		 * Except for dirty and target, other values should
209 		 * be 0 if writeback is not running.
210 		 */
211 		bch_hprint(rate,
212 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
213 			      : 0);
214 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
215 		bch_hprint(target, dc->writeback_rate_target << 9);
216 		bch_hprint(proportional,
217 			   wb ? dc->writeback_rate_proportional << 9 : 0);
218 		bch_hprint(integral,
219 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
220 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
221 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
222 					 NSEC_PER_MSEC) : 0;
223 
224 		return sprintf(buf,
225 			       "rate:\t\t%s/sec\n"
226 			       "dirty:\t\t%s\n"
227 			       "target:\t\t%s\n"
228 			       "proportional:\t%s\n"
229 			       "integral:\t%s\n"
230 			       "change:\t\t%s/sec\n"
231 			       "next io:\t%llims\n",
232 			       rate, dirty, target, proportional,
233 			       integral, change, next_io);
234 	}
235 
236 	sysfs_hprint(dirty_data,
237 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
238 
239 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
240 	var_printf(partial_stripes_expensive,	"%u");
241 
242 	var_hprint(sequential_cutoff);
243 	var_hprint(readahead);
244 
245 	sysfs_print(running,		atomic_read(&dc->running));
246 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
247 
248 	if (attr == &sysfs_label) {
249 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
250 		buf[SB_LABEL_SIZE + 1] = '\0';
251 		strcat(buf, "\n");
252 		return strlen(buf);
253 	}
254 
255 #undef var
256 	return 0;
257 }
258 SHOW_LOCKED(bch_cached_dev)
259 
STORE(__cached_dev)260 STORE(__cached_dev)
261 {
262 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
263 					     disk.kobj);
264 	ssize_t v;
265 	struct cache_set *c;
266 	struct kobj_uevent_env *env;
267 
268 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
269 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
270 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
271 
272 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
273 	d_strtoul(verify);
274 	d_strtoul(bypass_torture_test);
275 	d_strtoul(writeback_metadata);
276 	d_strtoul(writeback_running);
277 	d_strtoul(writeback_delay);
278 
279 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
280 
281 	if (attr == &sysfs_writeback_rate) {
282 		ssize_t ret;
283 		long int v = atomic_long_read(&dc->writeback_rate.rate);
284 
285 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
286 
287 		if (!ret) {
288 			atomic_long_set(&dc->writeback_rate.rate, v);
289 			ret = size;
290 		}
291 
292 		return ret;
293 	}
294 
295 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
296 			    dc->writeback_rate_update_seconds,
297 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
298 	sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
299 			    dc->writeback_rate_i_term_inverse,
300 			    1, UINT_MAX);
301 	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
302 			    dc->writeback_rate_p_term_inverse,
303 			    1, UINT_MAX);
304 	sysfs_strtoul_clamp(writeback_rate_minimum,
305 			    dc->writeback_rate_minimum,
306 			    1, UINT_MAX);
307 
308 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
309 
310 	if (attr == &sysfs_io_disable) {
311 		int v = strtoul_or_return(buf);
312 
313 		dc->io_disable = v ? 1 : 0;
314 	}
315 
316 	sysfs_strtoul_clamp(sequential_cutoff,
317 			    dc->sequential_cutoff,
318 			    0, UINT_MAX);
319 	d_strtoi_h(readahead);
320 
321 	if (attr == &sysfs_clear_stats)
322 		bch_cache_accounting_clear(&dc->accounting);
323 
324 	if (attr == &sysfs_running &&
325 	    strtoul_or_return(buf))
326 		bch_cached_dev_run(dc);
327 
328 	if (attr == &sysfs_cache_mode) {
329 		v = __sysfs_match_string(bch_cache_modes, -1, buf);
330 		if (v < 0)
331 			return v;
332 
333 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
334 			SET_BDEV_CACHE_MODE(&dc->sb, v);
335 			bch_write_bdev_super(dc, NULL);
336 		}
337 	}
338 
339 	if (attr == &sysfs_readahead_cache_policy) {
340 		v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
341 		if (v < 0)
342 			return v;
343 
344 		if ((unsigned int) v != dc->cache_readahead_policy)
345 			dc->cache_readahead_policy = v;
346 	}
347 
348 	if (attr == &sysfs_stop_when_cache_set_failed) {
349 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
350 		if (v < 0)
351 			return v;
352 
353 		dc->stop_when_cache_set_failed = v;
354 	}
355 
356 	if (attr == &sysfs_label) {
357 		if (size > SB_LABEL_SIZE)
358 			return -EINVAL;
359 		memcpy(dc->sb.label, buf, size);
360 		if (size < SB_LABEL_SIZE)
361 			dc->sb.label[size] = '\0';
362 		if (size && dc->sb.label[size - 1] == '\n')
363 			dc->sb.label[size - 1] = '\0';
364 		bch_write_bdev_super(dc, NULL);
365 		if (dc->disk.c) {
366 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
367 			       buf, SB_LABEL_SIZE);
368 			bch_uuid_write(dc->disk.c);
369 		}
370 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
371 		if (!env)
372 			return -ENOMEM;
373 		add_uevent_var(env, "DRIVER=bcache");
374 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
375 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
376 		kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
377 				   KOBJ_CHANGE,
378 				   env->envp);
379 		kfree(env);
380 	}
381 
382 	if (attr == &sysfs_attach) {
383 		uint8_t		set_uuid[16];
384 
385 		if (bch_parse_uuid(buf, set_uuid) < 16)
386 			return -EINVAL;
387 
388 		v = -ENOENT;
389 		list_for_each_entry(c, &bch_cache_sets, list) {
390 			v = bch_cached_dev_attach(dc, c, set_uuid);
391 			if (!v)
392 				return size;
393 		}
394 		if (v == -ENOENT)
395 			pr_err("Can't attach %s: cache set not found", buf);
396 		return v;
397 	}
398 
399 	if (attr == &sysfs_detach && dc->disk.c)
400 		bch_cached_dev_detach(dc);
401 
402 	if (attr == &sysfs_stop)
403 		bcache_device_stop(&dc->disk);
404 
405 	return size;
406 }
407 
STORE(bch_cached_dev)408 STORE(bch_cached_dev)
409 {
410 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
411 					     disk.kobj);
412 
413 	mutex_lock(&bch_register_lock);
414 	size = __cached_dev_store(kobj, attr, buf, size);
415 
416 	if (attr == &sysfs_writeback_running)
417 		bch_writeback_queue(dc);
418 
419 	/*
420 	 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
421 	 * a cache set, otherwise it doesn't make sense.
422 	 */
423 	if (attr == &sysfs_writeback_percent)
424 		if ((dc->disk.c != NULL) &&
425 		    (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
426 			schedule_delayed_work(&dc->writeback_rate_update,
427 				      dc->writeback_rate_update_seconds * HZ);
428 
429 	mutex_unlock(&bch_register_lock);
430 	return size;
431 }
432 
433 static struct attribute *bch_cached_dev_files[] = {
434 	&sysfs_attach,
435 	&sysfs_detach,
436 	&sysfs_stop,
437 #if 0
438 	&sysfs_data_csum,
439 #endif
440 	&sysfs_cache_mode,
441 	&sysfs_readahead_cache_policy,
442 	&sysfs_stop_when_cache_set_failed,
443 	&sysfs_writeback_metadata,
444 	&sysfs_writeback_running,
445 	&sysfs_writeback_delay,
446 	&sysfs_writeback_percent,
447 	&sysfs_writeback_rate,
448 	&sysfs_writeback_rate_update_seconds,
449 	&sysfs_writeback_rate_i_term_inverse,
450 	&sysfs_writeback_rate_p_term_inverse,
451 	&sysfs_writeback_rate_minimum,
452 	&sysfs_writeback_rate_debug,
453 	&sysfs_io_errors,
454 	&sysfs_io_error_limit,
455 	&sysfs_io_disable,
456 	&sysfs_dirty_data,
457 	&sysfs_stripe_size,
458 	&sysfs_partial_stripes_expensive,
459 	&sysfs_sequential_cutoff,
460 	&sysfs_clear_stats,
461 	&sysfs_running,
462 	&sysfs_state,
463 	&sysfs_label,
464 	&sysfs_readahead,
465 #ifdef CONFIG_BCACHE_DEBUG
466 	&sysfs_verify,
467 	&sysfs_bypass_torture_test,
468 #endif
469 	NULL
470 };
471 KTYPE(bch_cached_dev);
472 
SHOW(bch_flash_dev)473 SHOW(bch_flash_dev)
474 {
475 	struct bcache_device *d = container_of(kobj, struct bcache_device,
476 					       kobj);
477 	struct uuid_entry *u = &d->c->uuids[d->id];
478 
479 	sysfs_printf(data_csum,	"%i", d->data_csum);
480 	sysfs_hprint(size,	u->sectors << 9);
481 
482 	if (attr == &sysfs_label) {
483 		memcpy(buf, u->label, SB_LABEL_SIZE);
484 		buf[SB_LABEL_SIZE + 1] = '\0';
485 		strcat(buf, "\n");
486 		return strlen(buf);
487 	}
488 
489 	return 0;
490 }
491 
STORE(__bch_flash_dev)492 STORE(__bch_flash_dev)
493 {
494 	struct bcache_device *d = container_of(kobj, struct bcache_device,
495 					       kobj);
496 	struct uuid_entry *u = &d->c->uuids[d->id];
497 
498 	sysfs_strtoul(data_csum,	d->data_csum);
499 
500 	if (attr == &sysfs_size) {
501 		uint64_t v;
502 
503 		strtoi_h_or_return(buf, v);
504 
505 		u->sectors = v >> 9;
506 		bch_uuid_write(d->c);
507 		set_capacity(d->disk, u->sectors);
508 	}
509 
510 	if (attr == &sysfs_label) {
511 		memcpy(u->label, buf, SB_LABEL_SIZE);
512 		bch_uuid_write(d->c);
513 	}
514 
515 	if (attr == &sysfs_unregister) {
516 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
517 		bcache_device_stop(d);
518 	}
519 
520 	return size;
521 }
522 STORE_LOCKED(bch_flash_dev)
523 
524 static struct attribute *bch_flash_dev_files[] = {
525 	&sysfs_unregister,
526 #if 0
527 	&sysfs_data_csum,
528 #endif
529 	&sysfs_label,
530 	&sysfs_size,
531 	NULL
532 };
533 KTYPE(bch_flash_dev);
534 
535 struct bset_stats_op {
536 	struct btree_op op;
537 	size_t nodes;
538 	struct bset_stats stats;
539 };
540 
bch_btree_bset_stats(struct btree_op * b_op,struct btree * b)541 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
542 {
543 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
544 
545 	op->nodes++;
546 	bch_btree_keys_stats(&b->keys, &op->stats);
547 
548 	return MAP_CONTINUE;
549 }
550 
bch_bset_print_stats(struct cache_set * c,char * buf)551 static int bch_bset_print_stats(struct cache_set *c, char *buf)
552 {
553 	struct bset_stats_op op;
554 	int ret;
555 
556 	memset(&op, 0, sizeof(op));
557 	bch_btree_op_init(&op.op, -1);
558 
559 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
560 	if (ret < 0)
561 		return ret;
562 
563 	return snprintf(buf, PAGE_SIZE,
564 			"btree nodes:		%zu\n"
565 			"written sets:		%zu\n"
566 			"unwritten sets:		%zu\n"
567 			"written key bytes:	%zu\n"
568 			"unwritten key bytes:	%zu\n"
569 			"floats:			%zu\n"
570 			"failed:			%zu\n",
571 			op.nodes,
572 			op.stats.sets_written, op.stats.sets_unwritten,
573 			op.stats.bytes_written, op.stats.bytes_unwritten,
574 			op.stats.floats, op.stats.failed);
575 }
576 
bch_root_usage(struct cache_set * c)577 static unsigned int bch_root_usage(struct cache_set *c)
578 {
579 	unsigned int bytes = 0;
580 	struct bkey *k;
581 	struct btree *b;
582 	struct btree_iter iter;
583 
584 	goto lock_root;
585 
586 	do {
587 		rw_unlock(false, b);
588 lock_root:
589 		b = c->root;
590 		rw_lock(false, b, b->level);
591 	} while (b != c->root);
592 
593 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
594 		bytes += bkey_bytes(k);
595 
596 	rw_unlock(false, b);
597 
598 	return (bytes * 100) / btree_bytes(c);
599 }
600 
bch_cache_size(struct cache_set * c)601 static size_t bch_cache_size(struct cache_set *c)
602 {
603 	size_t ret = 0;
604 	struct btree *b;
605 
606 	mutex_lock(&c->bucket_lock);
607 	list_for_each_entry(b, &c->btree_cache, list)
608 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
609 
610 	mutex_unlock(&c->bucket_lock);
611 	return ret;
612 }
613 
bch_cache_max_chain(struct cache_set * c)614 static unsigned int bch_cache_max_chain(struct cache_set *c)
615 {
616 	unsigned int ret = 0;
617 	struct hlist_head *h;
618 
619 	mutex_lock(&c->bucket_lock);
620 
621 	for (h = c->bucket_hash;
622 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
623 	     h++) {
624 		unsigned int i = 0;
625 		struct hlist_node *p;
626 
627 		hlist_for_each(p, h)
628 			i++;
629 
630 		ret = max(ret, i);
631 	}
632 
633 	mutex_unlock(&c->bucket_lock);
634 	return ret;
635 }
636 
bch_btree_used(struct cache_set * c)637 static unsigned int bch_btree_used(struct cache_set *c)
638 {
639 	return div64_u64(c->gc_stats.key_bytes * 100,
640 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
641 }
642 
bch_average_key_size(struct cache_set * c)643 static unsigned int bch_average_key_size(struct cache_set *c)
644 {
645 	return c->gc_stats.nkeys
646 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
647 		: 0;
648 }
649 
SHOW(__bch_cache_set)650 SHOW(__bch_cache_set)
651 {
652 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
653 
654 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
655 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
656 	sysfs_hprint(bucket_size,		bucket_bytes(c));
657 	sysfs_hprint(block_size,		block_bytes(c));
658 	sysfs_print(tree_depth,			c->root->level);
659 	sysfs_print(root_usage_percent,		bch_root_usage(c));
660 
661 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
662 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
663 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
664 
665 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
666 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
667 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
668 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
669 
670 	sysfs_print(btree_used_percent,	bch_btree_used(c));
671 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
672 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
673 
674 	sysfs_print(cache_read_races,
675 		    atomic_long_read(&c->cache_read_races));
676 
677 	sysfs_print(reclaim,
678 		    atomic_long_read(&c->reclaim));
679 
680 	sysfs_print(flush_write,
681 		    atomic_long_read(&c->flush_write));
682 
683 	sysfs_print(retry_flush_write,
684 		    atomic_long_read(&c->retry_flush_write));
685 
686 	sysfs_print(writeback_keys_done,
687 		    atomic_long_read(&c->writeback_keys_done));
688 	sysfs_print(writeback_keys_failed,
689 		    atomic_long_read(&c->writeback_keys_failed));
690 
691 	if (attr == &sysfs_errors)
692 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
693 					       c->on_error);
694 
695 	/* See count_io_errors for why 88 */
696 	sysfs_print(io_error_halflife,	c->error_decay * 88);
697 	sysfs_print(io_error_limit,	c->error_limit);
698 
699 	sysfs_hprint(congested,
700 		     ((uint64_t) bch_get_congested(c)) << 9);
701 	sysfs_print(congested_read_threshold_us,
702 		    c->congested_read_threshold_us);
703 	sysfs_print(congested_write_threshold_us,
704 		    c->congested_write_threshold_us);
705 
706 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
707 	sysfs_printf(verify,			"%i", c->verify);
708 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
709 	sysfs_printf(expensive_debug_checks,
710 		     "%i", c->expensive_debug_checks);
711 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
712 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
713 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
714 	sysfs_printf(io_disable,		"%i",
715 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
716 
717 	if (attr == &sysfs_bset_tree_stats)
718 		return bch_bset_print_stats(c, buf);
719 
720 	return 0;
721 }
722 SHOW_LOCKED(bch_cache_set)
723 
STORE(__bch_cache_set)724 STORE(__bch_cache_set)
725 {
726 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
727 	ssize_t v;
728 
729 	if (attr == &sysfs_unregister)
730 		bch_cache_set_unregister(c);
731 
732 	if (attr == &sysfs_stop)
733 		bch_cache_set_stop(c);
734 
735 	if (attr == &sysfs_synchronous) {
736 		bool sync = strtoul_or_return(buf);
737 
738 		if (sync != CACHE_SYNC(&c->sb)) {
739 			SET_CACHE_SYNC(&c->sb, sync);
740 			bcache_write_super(c);
741 		}
742 	}
743 
744 	if (attr == &sysfs_flash_vol_create) {
745 		int r;
746 		uint64_t v;
747 
748 		strtoi_h_or_return(buf, v);
749 
750 		r = bch_flash_dev_create(c, v);
751 		if (r)
752 			return r;
753 	}
754 
755 	if (attr == &sysfs_clear_stats) {
756 		atomic_long_set(&c->writeback_keys_done,	0);
757 		atomic_long_set(&c->writeback_keys_failed,	0);
758 
759 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
760 		bch_cache_accounting_clear(&c->accounting);
761 	}
762 
763 	if (attr == &sysfs_trigger_gc) {
764 		/*
765 		 * Garbage collection thread only works when sectors_to_gc < 0,
766 		 * when users write to sysfs entry trigger_gc, most of time
767 		 * they want to forcibly triger gargage collection. Here -1 is
768 		 * set to c->sectors_to_gc, to make gc_should_run() give a
769 		 * chance to permit gc thread to run. "give a chance" means
770 		 * before going into gc_should_run(), there is still chance
771 		 * that c->sectors_to_gc being set to other positive value. So
772 		 * writing sysfs entry trigger_gc won't always make sure gc
773 		 * thread takes effect.
774 		 */
775 		atomic_set(&c->sectors_to_gc, -1);
776 		wake_up_gc(c);
777 	}
778 
779 	if (attr == &sysfs_prune_cache) {
780 		struct shrink_control sc;
781 
782 		sc.gfp_mask = GFP_KERNEL;
783 		sc.nr_to_scan = strtoul_or_return(buf);
784 		c->shrink.scan_objects(&c->shrink, &sc);
785 	}
786 
787 	sysfs_strtoul(congested_read_threshold_us,
788 		      c->congested_read_threshold_us);
789 	sysfs_strtoul(congested_write_threshold_us,
790 		      c->congested_write_threshold_us);
791 
792 	if (attr == &sysfs_errors) {
793 		v = __sysfs_match_string(error_actions, -1, buf);
794 		if (v < 0)
795 			return v;
796 
797 		c->on_error = v;
798 	}
799 
800 	if (attr == &sysfs_io_error_limit)
801 		c->error_limit = strtoul_or_return(buf);
802 
803 	/* See count_io_errors() for why 88 */
804 	if (attr == &sysfs_io_error_halflife) {
805 		unsigned long v = 0;
806 		ssize_t ret;
807 
808 		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
809 		if (!ret) {
810 			c->error_decay = v / 88;
811 			return size;
812 		}
813 		return ret;
814 	}
815 
816 	if (attr == &sysfs_io_disable) {
817 		v = strtoul_or_return(buf);
818 		if (v) {
819 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
820 					     &c->flags))
821 				pr_warn("CACHE_SET_IO_DISABLE already set");
822 		} else {
823 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
824 						&c->flags))
825 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
826 		}
827 	}
828 
829 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
830 	sysfs_strtoul(verify,			c->verify);
831 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
832 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
833 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
834 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
835 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
836 
837 	return size;
838 }
839 STORE_LOCKED(bch_cache_set)
840 
SHOW(bch_cache_set_internal)841 SHOW(bch_cache_set_internal)
842 {
843 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
844 
845 	return bch_cache_set_show(&c->kobj, attr, buf);
846 }
847 
STORE(bch_cache_set_internal)848 STORE(bch_cache_set_internal)
849 {
850 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
851 
852 	return bch_cache_set_store(&c->kobj, attr, buf, size);
853 }
854 
bch_cache_set_internal_release(struct kobject * k)855 static void bch_cache_set_internal_release(struct kobject *k)
856 {
857 }
858 
859 static struct attribute *bch_cache_set_files[] = {
860 	&sysfs_unregister,
861 	&sysfs_stop,
862 	&sysfs_synchronous,
863 	&sysfs_journal_delay_ms,
864 	&sysfs_flash_vol_create,
865 
866 	&sysfs_bucket_size,
867 	&sysfs_block_size,
868 	&sysfs_tree_depth,
869 	&sysfs_root_usage_percent,
870 	&sysfs_btree_cache_size,
871 	&sysfs_cache_available_percent,
872 
873 	&sysfs_average_key_size,
874 
875 	&sysfs_errors,
876 	&sysfs_io_error_limit,
877 	&sysfs_io_error_halflife,
878 	&sysfs_congested,
879 	&sysfs_congested_read_threshold_us,
880 	&sysfs_congested_write_threshold_us,
881 	&sysfs_clear_stats,
882 	NULL
883 };
884 KTYPE(bch_cache_set);
885 
886 static struct attribute *bch_cache_set_internal_files[] = {
887 	&sysfs_active_journal_entries,
888 
889 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
890 	sysfs_time_stats_attribute_list(btree_split, sec, us)
891 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
892 	sysfs_time_stats_attribute_list(btree_read, ms, us)
893 
894 	&sysfs_btree_nodes,
895 	&sysfs_btree_used_percent,
896 	&sysfs_btree_cache_max_chain,
897 
898 	&sysfs_bset_tree_stats,
899 	&sysfs_cache_read_races,
900 	&sysfs_reclaim,
901 	&sysfs_flush_write,
902 	&sysfs_retry_flush_write,
903 	&sysfs_writeback_keys_done,
904 	&sysfs_writeback_keys_failed,
905 
906 	&sysfs_trigger_gc,
907 	&sysfs_prune_cache,
908 #ifdef CONFIG_BCACHE_DEBUG
909 	&sysfs_verify,
910 	&sysfs_key_merging_disabled,
911 	&sysfs_expensive_debug_checks,
912 #endif
913 	&sysfs_gc_always_rewrite,
914 	&sysfs_btree_shrinker_disabled,
915 	&sysfs_copy_gc_enabled,
916 	&sysfs_io_disable,
917 	NULL
918 };
919 KTYPE(bch_cache_set_internal);
920 
__bch_cache_cmp(const void * l,const void * r)921 static int __bch_cache_cmp(const void *l, const void *r)
922 {
923 	return *((uint16_t *)r) - *((uint16_t *)l);
924 }
925 
SHOW(__bch_cache)926 SHOW(__bch_cache)
927 {
928 	struct cache *ca = container_of(kobj, struct cache, kobj);
929 
930 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
931 	sysfs_hprint(block_size,	block_bytes(ca));
932 	sysfs_print(nbuckets,		ca->sb.nbuckets);
933 	sysfs_print(discard,		ca->discard);
934 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
935 	sysfs_hprint(btree_written,
936 		     atomic_long_read(&ca->btree_sectors_written) << 9);
937 	sysfs_hprint(metadata_written,
938 		     (atomic_long_read(&ca->meta_sectors_written) +
939 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
940 
941 	sysfs_print(io_errors,
942 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
943 
944 	if (attr == &sysfs_cache_replacement_policy)
945 		return bch_snprint_string_list(buf, PAGE_SIZE,
946 					       cache_replacement_policies,
947 					       CACHE_REPLACEMENT(&ca->sb));
948 
949 	if (attr == &sysfs_priority_stats) {
950 		struct bucket *b;
951 		size_t n = ca->sb.nbuckets, i;
952 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
953 		uint64_t sum = 0;
954 		/* Compute 31 quantiles */
955 		uint16_t q[31], *p, *cached;
956 		ssize_t ret;
957 
958 		cached = p = vmalloc(array_size(sizeof(uint16_t),
959 						ca->sb.nbuckets));
960 		if (!p)
961 			return -ENOMEM;
962 
963 		mutex_lock(&ca->set->bucket_lock);
964 		for_each_bucket(b, ca) {
965 			if (!GC_SECTORS_USED(b))
966 				unused++;
967 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
968 				available++;
969 			if (GC_MARK(b) == GC_MARK_DIRTY)
970 				dirty++;
971 			if (GC_MARK(b) == GC_MARK_METADATA)
972 				meta++;
973 		}
974 
975 		for (i = ca->sb.first_bucket; i < n; i++)
976 			p[i] = ca->buckets[i].prio;
977 		mutex_unlock(&ca->set->bucket_lock);
978 
979 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
980 
981 		while (n &&
982 		       !cached[n - 1])
983 			--n;
984 
985 		unused = ca->sb.nbuckets - n;
986 
987 		while (cached < p + n &&
988 		       *cached == BTREE_PRIO)
989 			cached++, n--;
990 
991 		for (i = 0; i < n; i++)
992 			sum += INITIAL_PRIO - cached[i];
993 
994 		if (n)
995 			do_div(sum, n);
996 
997 		for (i = 0; i < ARRAY_SIZE(q); i++)
998 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
999 				(ARRAY_SIZE(q) + 1)];
1000 
1001 		vfree(p);
1002 
1003 		ret = scnprintf(buf, PAGE_SIZE,
1004 				"Unused:		%zu%%\n"
1005 				"Clean:		%zu%%\n"
1006 				"Dirty:		%zu%%\n"
1007 				"Metadata:	%zu%%\n"
1008 				"Average:	%llu\n"
1009 				"Sectors per Q:	%zu\n"
1010 				"Quantiles:	[",
1011 				unused * 100 / (size_t) ca->sb.nbuckets,
1012 				available * 100 / (size_t) ca->sb.nbuckets,
1013 				dirty * 100 / (size_t) ca->sb.nbuckets,
1014 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
1015 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1016 
1017 		for (i = 0; i < ARRAY_SIZE(q); i++)
1018 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1019 					 "%u ", q[i]);
1020 		ret--;
1021 
1022 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1023 
1024 		return ret;
1025 	}
1026 
1027 	return 0;
1028 }
1029 SHOW_LOCKED(bch_cache)
1030 
STORE(__bch_cache)1031 STORE(__bch_cache)
1032 {
1033 	struct cache *ca = container_of(kobj, struct cache, kobj);
1034 	ssize_t v;
1035 
1036 	if (attr == &sysfs_discard) {
1037 		bool v = strtoul_or_return(buf);
1038 
1039 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1040 			ca->discard = v;
1041 
1042 		if (v != CACHE_DISCARD(&ca->sb)) {
1043 			SET_CACHE_DISCARD(&ca->sb, v);
1044 			bcache_write_super(ca->set);
1045 		}
1046 	}
1047 
1048 	if (attr == &sysfs_cache_replacement_policy) {
1049 		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1050 		if (v < 0)
1051 			return v;
1052 
1053 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1054 			mutex_lock(&ca->set->bucket_lock);
1055 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1056 			mutex_unlock(&ca->set->bucket_lock);
1057 
1058 			bcache_write_super(ca->set);
1059 		}
1060 	}
1061 
1062 	if (attr == &sysfs_clear_stats) {
1063 		atomic_long_set(&ca->sectors_written, 0);
1064 		atomic_long_set(&ca->btree_sectors_written, 0);
1065 		atomic_long_set(&ca->meta_sectors_written, 0);
1066 		atomic_set(&ca->io_count, 0);
1067 		atomic_set(&ca->io_errors, 0);
1068 	}
1069 
1070 	return size;
1071 }
1072 STORE_LOCKED(bch_cache)
1073 
1074 static struct attribute *bch_cache_files[] = {
1075 	&sysfs_bucket_size,
1076 	&sysfs_block_size,
1077 	&sysfs_nbuckets,
1078 	&sysfs_priority_stats,
1079 	&sysfs_discard,
1080 	&sysfs_written,
1081 	&sysfs_btree_written,
1082 	&sysfs_metadata_written,
1083 	&sysfs_io_errors,
1084 	&sysfs_clear_stats,
1085 	&sysfs_cache_replacement_policy,
1086 	NULL
1087 };
1088 KTYPE(bch_cache);
1089