• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 extern bool bcache_is_reboot;
20 
21 /* Default is 0 ("writethrough") */
22 static const char * const bch_cache_modes[] = {
23 	"writethrough",
24 	"writeback",
25 	"writearound",
26 	"none",
27 	NULL
28 };
29 
30 static const char * const bch_reada_cache_policies[] = {
31 	"all",
32 	"meta-only",
33 	NULL
34 };
35 
36 /* Default is 0 ("auto") */
37 static const char * const bch_stop_on_failure_modes[] = {
38 	"auto",
39 	"always",
40 	NULL
41 };
42 
43 static const char * const cache_replacement_policies[] = {
44 	"lru",
45 	"fifo",
46 	"random",
47 	NULL
48 };
49 
50 static const char * const error_actions[] = {
51 	"unregister",
52 	"panic",
53 	NULL
54 };
55 
56 write_attribute(attach);
57 write_attribute(detach);
58 write_attribute(unregister);
59 write_attribute(stop);
60 write_attribute(clear_stats);
61 write_attribute(trigger_gc);
62 write_attribute(prune_cache);
63 write_attribute(flash_vol_create);
64 
65 read_attribute(bucket_size);
66 read_attribute(block_size);
67 read_attribute(nbuckets);
68 read_attribute(tree_depth);
69 read_attribute(root_usage_percent);
70 read_attribute(priority_stats);
71 read_attribute(btree_cache_size);
72 read_attribute(btree_cache_max_chain);
73 read_attribute(cache_available_percent);
74 read_attribute(written);
75 read_attribute(btree_written);
76 read_attribute(metadata_written);
77 read_attribute(active_journal_entries);
78 read_attribute(backing_dev_name);
79 read_attribute(backing_dev_uuid);
80 
81 sysfs_time_stats_attribute(btree_gc,	sec, ms);
82 sysfs_time_stats_attribute(btree_split, sec, us);
83 sysfs_time_stats_attribute(btree_sort,	ms,  us);
84 sysfs_time_stats_attribute(btree_read,	ms,  us);
85 
86 read_attribute(btree_nodes);
87 read_attribute(btree_used_percent);
88 read_attribute(average_key_size);
89 read_attribute(dirty_data);
90 read_attribute(bset_tree_stats);
91 
92 read_attribute(state);
93 read_attribute(cache_read_races);
94 read_attribute(reclaim);
95 read_attribute(reclaimed_journal_buckets);
96 read_attribute(flush_write);
97 read_attribute(writeback_keys_done);
98 read_attribute(writeback_keys_failed);
99 read_attribute(io_errors);
100 read_attribute(congested);
101 read_attribute(cutoff_writeback);
102 read_attribute(cutoff_writeback_sync);
103 rw_attribute(congested_read_threshold_us);
104 rw_attribute(congested_write_threshold_us);
105 
106 rw_attribute(sequential_cutoff);
107 rw_attribute(data_csum);
108 rw_attribute(cache_mode);
109 rw_attribute(readahead_cache_policy);
110 rw_attribute(stop_when_cache_set_failed);
111 rw_attribute(writeback_metadata);
112 rw_attribute(writeback_running);
113 rw_attribute(writeback_percent);
114 rw_attribute(writeback_delay);
115 rw_attribute(writeback_rate);
116 
117 rw_attribute(writeback_rate_update_seconds);
118 rw_attribute(writeback_rate_i_term_inverse);
119 rw_attribute(writeback_rate_p_term_inverse);
120 rw_attribute(writeback_rate_minimum);
121 read_attribute(writeback_rate_debug);
122 
123 read_attribute(stripe_size);
124 read_attribute(partial_stripes_expensive);
125 
126 rw_attribute(synchronous);
127 rw_attribute(journal_delay_ms);
128 rw_attribute(io_disable);
129 rw_attribute(discard);
130 rw_attribute(running);
131 rw_attribute(label);
132 rw_attribute(readahead);
133 rw_attribute(errors);
134 rw_attribute(io_error_limit);
135 rw_attribute(io_error_halflife);
136 rw_attribute(verify);
137 rw_attribute(bypass_torture_test);
138 rw_attribute(key_merging_disabled);
139 rw_attribute(gc_always_rewrite);
140 rw_attribute(expensive_debug_checks);
141 rw_attribute(cache_replacement_policy);
142 rw_attribute(btree_shrinker_disabled);
143 rw_attribute(copy_gc_enabled);
144 rw_attribute(gc_after_writeback);
145 rw_attribute(size);
146 
bch_snprint_string_list(char * buf,size_t size,const char * const list[],size_t selected)147 static ssize_t bch_snprint_string_list(char *buf,
148 				       size_t size,
149 				       const char * const list[],
150 				       size_t selected)
151 {
152 	char *out = buf;
153 	size_t i;
154 
155 	for (i = 0; list[i]; i++)
156 		out += snprintf(out, buf + size - out,
157 				i == selected ? "[%s] " : "%s ", list[i]);
158 
159 	out[-1] = '\n';
160 	return out - buf;
161 }
162 
SHOW(__bch_cached_dev)163 SHOW(__bch_cached_dev)
164 {
165 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
166 					     disk.kobj);
167 	char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
168 	int wb = dc->writeback_running;
169 
170 #define var(stat)		(dc->stat)
171 
172 	if (attr == &sysfs_cache_mode)
173 		return bch_snprint_string_list(buf, PAGE_SIZE,
174 					       bch_cache_modes,
175 					       BDEV_CACHE_MODE(&dc->sb));
176 
177 	if (attr == &sysfs_readahead_cache_policy)
178 		return bch_snprint_string_list(buf, PAGE_SIZE,
179 					      bch_reada_cache_policies,
180 					      dc->cache_readahead_policy);
181 
182 	if (attr == &sysfs_stop_when_cache_set_failed)
183 		return bch_snprint_string_list(buf, PAGE_SIZE,
184 					       bch_stop_on_failure_modes,
185 					       dc->stop_when_cache_set_failed);
186 
187 
188 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
189 	var_printf(verify,		"%i");
190 	var_printf(bypass_torture_test,	"%i");
191 	var_printf(writeback_metadata,	"%i");
192 	var_printf(writeback_running,	"%i");
193 	var_print(writeback_delay);
194 	var_print(writeback_percent);
195 	sysfs_hprint(writeback_rate,
196 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
197 	sysfs_printf(io_errors,		"%i", atomic_read(&dc->io_errors));
198 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
199 	sysfs_printf(io_disable,	"%i", dc->io_disable);
200 	var_print(writeback_rate_update_seconds);
201 	var_print(writeback_rate_i_term_inverse);
202 	var_print(writeback_rate_p_term_inverse);
203 	var_print(writeback_rate_minimum);
204 
205 	if (attr == &sysfs_writeback_rate_debug) {
206 		char rate[20];
207 		char dirty[20];
208 		char target[20];
209 		char proportional[20];
210 		char integral[20];
211 		char change[20];
212 		s64 next_io;
213 
214 		/*
215 		 * Except for dirty and target, other values should
216 		 * be 0 if writeback is not running.
217 		 */
218 		bch_hprint(rate,
219 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
220 			      : 0);
221 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
222 		bch_hprint(target, dc->writeback_rate_target << 9);
223 		bch_hprint(proportional,
224 			   wb ? dc->writeback_rate_proportional << 9 : 0);
225 		bch_hprint(integral,
226 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
227 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
228 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
229 					 NSEC_PER_MSEC) : 0;
230 
231 		return sprintf(buf,
232 			       "rate:\t\t%s/sec\n"
233 			       "dirty:\t\t%s\n"
234 			       "target:\t\t%s\n"
235 			       "proportional:\t%s\n"
236 			       "integral:\t%s\n"
237 			       "change:\t\t%s/sec\n"
238 			       "next io:\t%llims\n",
239 			       rate, dirty, target, proportional,
240 			       integral, change, next_io);
241 	}
242 
243 	sysfs_hprint(dirty_data,
244 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
245 
246 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
247 	var_printf(partial_stripes_expensive,	"%u");
248 
249 	var_hprint(sequential_cutoff);
250 	var_hprint(readahead);
251 
252 	sysfs_print(running,		atomic_read(&dc->running));
253 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
254 
255 	if (attr == &sysfs_label) {
256 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
257 		buf[SB_LABEL_SIZE + 1] = '\0';
258 		strcat(buf, "\n");
259 		return strlen(buf);
260 	}
261 
262 	if (attr == &sysfs_backing_dev_name) {
263 		snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
264 		strcat(buf, "\n");
265 		return strlen(buf);
266 	}
267 
268 	if (attr == &sysfs_backing_dev_uuid) {
269 		/* convert binary uuid into 36-byte string plus '\0' */
270 		snprintf(buf, 36+1, "%pU", dc->sb.uuid);
271 		strcat(buf, "\n");
272 		return strlen(buf);
273 	}
274 
275 #undef var
276 	return 0;
277 }
278 SHOW_LOCKED(bch_cached_dev)
279 
STORE(__cached_dev)280 STORE(__cached_dev)
281 {
282 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
283 					     disk.kobj);
284 	ssize_t v;
285 	struct cache_set *c;
286 	struct kobj_uevent_env *env;
287 
288 	/* no user space access if system is rebooting */
289 	if (bcache_is_reboot)
290 		return -EBUSY;
291 
292 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
293 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
294 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
295 
296 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
297 	d_strtoul(verify);
298 	sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
299 	sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
300 	sysfs_strtoul_bool(writeback_running, dc->writeback_running);
301 	sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
302 
303 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
304 			    0, bch_cutoff_writeback);
305 
306 	if (attr == &sysfs_writeback_rate) {
307 		ssize_t ret;
308 		long int v = atomic_long_read(&dc->writeback_rate.rate);
309 
310 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
311 
312 		if (!ret) {
313 			atomic_long_set(&dc->writeback_rate.rate, v);
314 			ret = size;
315 		}
316 
317 		return ret;
318 	}
319 
320 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
321 			    dc->writeback_rate_update_seconds,
322 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
323 	sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
324 			    dc->writeback_rate_i_term_inverse,
325 			    1, UINT_MAX);
326 	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
327 			    dc->writeback_rate_p_term_inverse,
328 			    1, UINT_MAX);
329 	sysfs_strtoul_clamp(writeback_rate_minimum,
330 			    dc->writeback_rate_minimum,
331 			    1, UINT_MAX);
332 
333 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
334 
335 	if (attr == &sysfs_io_disable) {
336 		int v = strtoul_or_return(buf);
337 
338 		dc->io_disable = v ? 1 : 0;
339 	}
340 
341 	sysfs_strtoul_clamp(sequential_cutoff,
342 			    dc->sequential_cutoff,
343 			    0, UINT_MAX);
344 	d_strtoi_h(readahead);
345 
346 	if (attr == &sysfs_clear_stats)
347 		bch_cache_accounting_clear(&dc->accounting);
348 
349 	if (attr == &sysfs_running &&
350 	    strtoul_or_return(buf)) {
351 		v = bch_cached_dev_run(dc);
352 		if (v)
353 			return v;
354 	}
355 
356 	if (attr == &sysfs_cache_mode) {
357 		v = __sysfs_match_string(bch_cache_modes, -1, buf);
358 		if (v < 0)
359 			return v;
360 
361 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
362 			SET_BDEV_CACHE_MODE(&dc->sb, v);
363 			bch_write_bdev_super(dc, NULL);
364 		}
365 	}
366 
367 	if (attr == &sysfs_readahead_cache_policy) {
368 		v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
369 		if (v < 0)
370 			return v;
371 
372 		if ((unsigned int) v != dc->cache_readahead_policy)
373 			dc->cache_readahead_policy = v;
374 	}
375 
376 	if (attr == &sysfs_stop_when_cache_set_failed) {
377 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
378 		if (v < 0)
379 			return v;
380 
381 		dc->stop_when_cache_set_failed = v;
382 	}
383 
384 	if (attr == &sysfs_label) {
385 		if (size > SB_LABEL_SIZE)
386 			return -EINVAL;
387 		memcpy(dc->sb.label, buf, size);
388 		if (size < SB_LABEL_SIZE)
389 			dc->sb.label[size] = '\0';
390 		if (size && dc->sb.label[size - 1] == '\n')
391 			dc->sb.label[size - 1] = '\0';
392 		bch_write_bdev_super(dc, NULL);
393 		if (dc->disk.c) {
394 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
395 			       buf, SB_LABEL_SIZE);
396 			bch_uuid_write(dc->disk.c);
397 		}
398 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
399 		if (!env)
400 			return -ENOMEM;
401 		add_uevent_var(env, "DRIVER=bcache");
402 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
403 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
404 		kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
405 				   KOBJ_CHANGE,
406 				   env->envp);
407 		kfree(env);
408 	}
409 
410 	if (attr == &sysfs_attach) {
411 		uint8_t		set_uuid[16];
412 
413 		if (bch_parse_uuid(buf, set_uuid) < 16)
414 			return -EINVAL;
415 
416 		v = -ENOENT;
417 		list_for_each_entry(c, &bch_cache_sets, list) {
418 			v = bch_cached_dev_attach(dc, c, set_uuid);
419 			if (!v)
420 				return size;
421 		}
422 		if (v == -ENOENT)
423 			pr_err("Can't attach %s: cache set not found", buf);
424 		return v;
425 	}
426 
427 	if (attr == &sysfs_detach && dc->disk.c)
428 		bch_cached_dev_detach(dc);
429 
430 	if (attr == &sysfs_stop)
431 		bcache_device_stop(&dc->disk);
432 
433 	return size;
434 }
435 
STORE(bch_cached_dev)436 STORE(bch_cached_dev)
437 {
438 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
439 					     disk.kobj);
440 
441 	/* no user space access if system is rebooting */
442 	if (bcache_is_reboot)
443 		return -EBUSY;
444 
445 	mutex_lock(&bch_register_lock);
446 	size = __cached_dev_store(kobj, attr, buf, size);
447 
448 	if (attr == &sysfs_writeback_running) {
449 		/* dc->writeback_running changed in __cached_dev_store() */
450 		if (IS_ERR_OR_NULL(dc->writeback_thread)) {
451 			/*
452 			 * reject setting it to 1 via sysfs if writeback
453 			 * kthread is not created yet.
454 			 */
455 			if (dc->writeback_running) {
456 				dc->writeback_running = false;
457 				pr_err("%s: failed to run non-existent writeback thread",
458 						dc->disk.disk->disk_name);
459 			}
460 		} else
461 			/*
462 			 * writeback kthread will check if dc->writeback_running
463 			 * is true or false.
464 			 */
465 			bch_writeback_queue(dc);
466 	}
467 
468 	/*
469 	 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
470 	 * a cache set, otherwise it doesn't make sense.
471 	 */
472 	if (attr == &sysfs_writeback_percent)
473 		if ((dc->disk.c != NULL) &&
474 		    (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
475 			schedule_delayed_work(&dc->writeback_rate_update,
476 				      dc->writeback_rate_update_seconds * HZ);
477 
478 	mutex_unlock(&bch_register_lock);
479 	return size;
480 }
481 
482 static struct attribute *bch_cached_dev_files[] = {
483 	&sysfs_attach,
484 	&sysfs_detach,
485 	&sysfs_stop,
486 #if 0
487 	&sysfs_data_csum,
488 #endif
489 	&sysfs_cache_mode,
490 	&sysfs_readahead_cache_policy,
491 	&sysfs_stop_when_cache_set_failed,
492 	&sysfs_writeback_metadata,
493 	&sysfs_writeback_running,
494 	&sysfs_writeback_delay,
495 	&sysfs_writeback_percent,
496 	&sysfs_writeback_rate,
497 	&sysfs_writeback_rate_update_seconds,
498 	&sysfs_writeback_rate_i_term_inverse,
499 	&sysfs_writeback_rate_p_term_inverse,
500 	&sysfs_writeback_rate_minimum,
501 	&sysfs_writeback_rate_debug,
502 	&sysfs_io_errors,
503 	&sysfs_io_error_limit,
504 	&sysfs_io_disable,
505 	&sysfs_dirty_data,
506 	&sysfs_stripe_size,
507 	&sysfs_partial_stripes_expensive,
508 	&sysfs_sequential_cutoff,
509 	&sysfs_clear_stats,
510 	&sysfs_running,
511 	&sysfs_state,
512 	&sysfs_label,
513 	&sysfs_readahead,
514 #ifdef CONFIG_BCACHE_DEBUG
515 	&sysfs_verify,
516 	&sysfs_bypass_torture_test,
517 #endif
518 	&sysfs_backing_dev_name,
519 	&sysfs_backing_dev_uuid,
520 	NULL
521 };
522 KTYPE(bch_cached_dev);
523 
SHOW(bch_flash_dev)524 SHOW(bch_flash_dev)
525 {
526 	struct bcache_device *d = container_of(kobj, struct bcache_device,
527 					       kobj);
528 	struct uuid_entry *u = &d->c->uuids[d->id];
529 
530 	sysfs_printf(data_csum,	"%i", d->data_csum);
531 	sysfs_hprint(size,	u->sectors << 9);
532 
533 	if (attr == &sysfs_label) {
534 		memcpy(buf, u->label, SB_LABEL_SIZE);
535 		buf[SB_LABEL_SIZE + 1] = '\0';
536 		strcat(buf, "\n");
537 		return strlen(buf);
538 	}
539 
540 	return 0;
541 }
542 
STORE(__bch_flash_dev)543 STORE(__bch_flash_dev)
544 {
545 	struct bcache_device *d = container_of(kobj, struct bcache_device,
546 					       kobj);
547 	struct uuid_entry *u = &d->c->uuids[d->id];
548 
549 	/* no user space access if system is rebooting */
550 	if (bcache_is_reboot)
551 		return -EBUSY;
552 
553 	sysfs_strtoul(data_csum,	d->data_csum);
554 
555 	if (attr == &sysfs_size) {
556 		uint64_t v;
557 
558 		strtoi_h_or_return(buf, v);
559 
560 		u->sectors = v >> 9;
561 		bch_uuid_write(d->c);
562 		set_capacity(d->disk, u->sectors);
563 	}
564 
565 	if (attr == &sysfs_label) {
566 		memcpy(u->label, buf, SB_LABEL_SIZE);
567 		bch_uuid_write(d->c);
568 	}
569 
570 	if (attr == &sysfs_unregister) {
571 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
572 		bcache_device_stop(d);
573 	}
574 
575 	return size;
576 }
577 STORE_LOCKED(bch_flash_dev)
578 
579 static struct attribute *bch_flash_dev_files[] = {
580 	&sysfs_unregister,
581 #if 0
582 	&sysfs_data_csum,
583 #endif
584 	&sysfs_label,
585 	&sysfs_size,
586 	NULL
587 };
588 KTYPE(bch_flash_dev);
589 
590 struct bset_stats_op {
591 	struct btree_op op;
592 	size_t nodes;
593 	struct bset_stats stats;
594 };
595 
bch_btree_bset_stats(struct btree_op * b_op,struct btree * b)596 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
597 {
598 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
599 
600 	op->nodes++;
601 	bch_btree_keys_stats(&b->keys, &op->stats);
602 
603 	return MAP_CONTINUE;
604 }
605 
bch_bset_print_stats(struct cache_set * c,char * buf)606 static int bch_bset_print_stats(struct cache_set *c, char *buf)
607 {
608 	struct bset_stats_op op;
609 	int ret;
610 
611 	memset(&op, 0, sizeof(op));
612 	bch_btree_op_init(&op.op, -1);
613 
614 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
615 	if (ret < 0)
616 		return ret;
617 
618 	return snprintf(buf, PAGE_SIZE,
619 			"btree nodes:		%zu\n"
620 			"written sets:		%zu\n"
621 			"unwritten sets:		%zu\n"
622 			"written key bytes:	%zu\n"
623 			"unwritten key bytes:	%zu\n"
624 			"floats:			%zu\n"
625 			"failed:			%zu\n",
626 			op.nodes,
627 			op.stats.sets_written, op.stats.sets_unwritten,
628 			op.stats.bytes_written, op.stats.bytes_unwritten,
629 			op.stats.floats, op.stats.failed);
630 }
631 
bch_root_usage(struct cache_set * c)632 static unsigned int bch_root_usage(struct cache_set *c)
633 {
634 	unsigned int bytes = 0;
635 	struct bkey *k;
636 	struct btree *b;
637 	struct btree_iter iter;
638 
639 	goto lock_root;
640 
641 	do {
642 		rw_unlock(false, b);
643 lock_root:
644 		b = c->root;
645 		rw_lock(false, b, b->level);
646 	} while (b != c->root);
647 
648 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
649 		bytes += bkey_bytes(k);
650 
651 	rw_unlock(false, b);
652 
653 	return (bytes * 100) / btree_bytes(c);
654 }
655 
bch_cache_size(struct cache_set * c)656 static size_t bch_cache_size(struct cache_set *c)
657 {
658 	size_t ret = 0;
659 	struct btree *b;
660 
661 	mutex_lock(&c->bucket_lock);
662 	list_for_each_entry(b, &c->btree_cache, list)
663 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
664 
665 	mutex_unlock(&c->bucket_lock);
666 	return ret;
667 }
668 
bch_cache_max_chain(struct cache_set * c)669 static unsigned int bch_cache_max_chain(struct cache_set *c)
670 {
671 	unsigned int ret = 0;
672 	struct hlist_head *h;
673 
674 	mutex_lock(&c->bucket_lock);
675 
676 	for (h = c->bucket_hash;
677 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
678 	     h++) {
679 		unsigned int i = 0;
680 		struct hlist_node *p;
681 
682 		hlist_for_each(p, h)
683 			i++;
684 
685 		ret = max(ret, i);
686 	}
687 
688 	mutex_unlock(&c->bucket_lock);
689 	return ret;
690 }
691 
bch_btree_used(struct cache_set * c)692 static unsigned int bch_btree_used(struct cache_set *c)
693 {
694 	return div64_u64(c->gc_stats.key_bytes * 100,
695 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
696 }
697 
bch_average_key_size(struct cache_set * c)698 static unsigned int bch_average_key_size(struct cache_set *c)
699 {
700 	return c->gc_stats.nkeys
701 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
702 		: 0;
703 }
704 
SHOW(__bch_cache_set)705 SHOW(__bch_cache_set)
706 {
707 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
708 
709 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
710 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
711 	sysfs_hprint(bucket_size,		bucket_bytes(c));
712 	sysfs_hprint(block_size,		block_bytes(c));
713 	sysfs_print(tree_depth,			c->root->level);
714 	sysfs_print(root_usage_percent,		bch_root_usage(c));
715 
716 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
717 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
718 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
719 
720 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
721 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
722 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
723 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
724 
725 	sysfs_print(btree_used_percent,	bch_btree_used(c));
726 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
727 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
728 
729 	sysfs_print(cache_read_races,
730 		    atomic_long_read(&c->cache_read_races));
731 
732 	sysfs_print(reclaim,
733 		    atomic_long_read(&c->reclaim));
734 
735 	sysfs_print(reclaimed_journal_buckets,
736 		    atomic_long_read(&c->reclaimed_journal_buckets));
737 
738 	sysfs_print(flush_write,
739 		    atomic_long_read(&c->flush_write));
740 
741 	sysfs_print(writeback_keys_done,
742 		    atomic_long_read(&c->writeback_keys_done));
743 	sysfs_print(writeback_keys_failed,
744 		    atomic_long_read(&c->writeback_keys_failed));
745 
746 	if (attr == &sysfs_errors)
747 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
748 					       c->on_error);
749 
750 	/* See count_io_errors for why 88 */
751 	sysfs_print(io_error_halflife,	c->error_decay * 88);
752 	sysfs_print(io_error_limit,	c->error_limit);
753 
754 	sysfs_hprint(congested,
755 		     ((uint64_t) bch_get_congested(c)) << 9);
756 	sysfs_print(congested_read_threshold_us,
757 		    c->congested_read_threshold_us);
758 	sysfs_print(congested_write_threshold_us,
759 		    c->congested_write_threshold_us);
760 
761 	sysfs_print(cutoff_writeback, bch_cutoff_writeback);
762 	sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
763 
764 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
765 	sysfs_printf(verify,			"%i", c->verify);
766 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
767 	sysfs_printf(expensive_debug_checks,
768 		     "%i", c->expensive_debug_checks);
769 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
770 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
771 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
772 	sysfs_printf(gc_after_writeback,	"%i", c->gc_after_writeback);
773 	sysfs_printf(io_disable,		"%i",
774 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
775 
776 	if (attr == &sysfs_bset_tree_stats)
777 		return bch_bset_print_stats(c, buf);
778 
779 	return 0;
780 }
781 SHOW_LOCKED(bch_cache_set)
782 
STORE(__bch_cache_set)783 STORE(__bch_cache_set)
784 {
785 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
786 	ssize_t v;
787 
788 	/* no user space access if system is rebooting */
789 	if (bcache_is_reboot)
790 		return -EBUSY;
791 
792 	if (attr == &sysfs_unregister)
793 		bch_cache_set_unregister(c);
794 
795 	if (attr == &sysfs_stop)
796 		bch_cache_set_stop(c);
797 
798 	if (attr == &sysfs_synchronous) {
799 		bool sync = strtoul_or_return(buf);
800 
801 		if (sync != CACHE_SYNC(&c->sb)) {
802 			SET_CACHE_SYNC(&c->sb, sync);
803 			bcache_write_super(c);
804 		}
805 	}
806 
807 	if (attr == &sysfs_flash_vol_create) {
808 		int r;
809 		uint64_t v;
810 
811 		strtoi_h_or_return(buf, v);
812 
813 		r = bch_flash_dev_create(c, v);
814 		if (r)
815 			return r;
816 	}
817 
818 	if (attr == &sysfs_clear_stats) {
819 		atomic_long_set(&c->writeback_keys_done,	0);
820 		atomic_long_set(&c->writeback_keys_failed,	0);
821 
822 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
823 		bch_cache_accounting_clear(&c->accounting);
824 	}
825 
826 	if (attr == &sysfs_trigger_gc)
827 		force_wake_up_gc(c);
828 
829 	if (attr == &sysfs_prune_cache) {
830 		struct shrink_control sc;
831 
832 		sc.gfp_mask = GFP_KERNEL;
833 		sc.nr_to_scan = strtoul_or_return(buf);
834 		c->shrink.scan_objects(&c->shrink, &sc);
835 	}
836 
837 	sysfs_strtoul_clamp(congested_read_threshold_us,
838 			    c->congested_read_threshold_us,
839 			    0, UINT_MAX);
840 	sysfs_strtoul_clamp(congested_write_threshold_us,
841 			    c->congested_write_threshold_us,
842 			    0, UINT_MAX);
843 
844 	if (attr == &sysfs_errors) {
845 		v = __sysfs_match_string(error_actions, -1, buf);
846 		if (v < 0)
847 			return v;
848 
849 		c->on_error = v;
850 	}
851 
852 	sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
853 
854 	/* See count_io_errors() for why 88 */
855 	if (attr == &sysfs_io_error_halflife) {
856 		unsigned long v = 0;
857 		ssize_t ret;
858 
859 		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
860 		if (!ret) {
861 			c->error_decay = v / 88;
862 			return size;
863 		}
864 		return ret;
865 	}
866 
867 	if (attr == &sysfs_io_disable) {
868 		v = strtoul_or_return(buf);
869 		if (v) {
870 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
871 					     &c->flags))
872 				pr_warn("CACHE_SET_IO_DISABLE already set");
873 		} else {
874 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
875 						&c->flags))
876 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
877 		}
878 	}
879 
880 	sysfs_strtoul_clamp(journal_delay_ms,
881 			    c->journal_delay_ms,
882 			    0, USHRT_MAX);
883 	sysfs_strtoul_bool(verify,		c->verify);
884 	sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
885 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
886 	sysfs_strtoul_bool(gc_always_rewrite,	c->gc_always_rewrite);
887 	sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
888 	sysfs_strtoul_bool(copy_gc_enabled,	c->copy_gc_enabled);
889 	/*
890 	 * write gc_after_writeback here may overwrite an already set
891 	 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
892 	 * set in next chance.
893 	 */
894 	sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
895 
896 	return size;
897 }
898 STORE_LOCKED(bch_cache_set)
899 
SHOW(bch_cache_set_internal)900 SHOW(bch_cache_set_internal)
901 {
902 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
903 
904 	return bch_cache_set_show(&c->kobj, attr, buf);
905 }
906 
STORE(bch_cache_set_internal)907 STORE(bch_cache_set_internal)
908 {
909 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
910 
911 	/* no user space access if system is rebooting */
912 	if (bcache_is_reboot)
913 		return -EBUSY;
914 
915 	return bch_cache_set_store(&c->kobj, attr, buf, size);
916 }
917 
bch_cache_set_internal_release(struct kobject * k)918 static void bch_cache_set_internal_release(struct kobject *k)
919 {
920 }
921 
922 static struct attribute *bch_cache_set_files[] = {
923 	&sysfs_unregister,
924 	&sysfs_stop,
925 	&sysfs_synchronous,
926 	&sysfs_journal_delay_ms,
927 	&sysfs_flash_vol_create,
928 
929 	&sysfs_bucket_size,
930 	&sysfs_block_size,
931 	&sysfs_tree_depth,
932 	&sysfs_root_usage_percent,
933 	&sysfs_btree_cache_size,
934 	&sysfs_cache_available_percent,
935 
936 	&sysfs_average_key_size,
937 
938 	&sysfs_errors,
939 	&sysfs_io_error_limit,
940 	&sysfs_io_error_halflife,
941 	&sysfs_congested,
942 	&sysfs_congested_read_threshold_us,
943 	&sysfs_congested_write_threshold_us,
944 	&sysfs_clear_stats,
945 	NULL
946 };
947 KTYPE(bch_cache_set);
948 
949 static struct attribute *bch_cache_set_internal_files[] = {
950 	&sysfs_active_journal_entries,
951 
952 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
953 	sysfs_time_stats_attribute_list(btree_split, sec, us)
954 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
955 	sysfs_time_stats_attribute_list(btree_read, ms, us)
956 
957 	&sysfs_btree_nodes,
958 	&sysfs_btree_used_percent,
959 	&sysfs_btree_cache_max_chain,
960 
961 	&sysfs_bset_tree_stats,
962 	&sysfs_cache_read_races,
963 	&sysfs_reclaim,
964 	&sysfs_reclaimed_journal_buckets,
965 	&sysfs_flush_write,
966 	&sysfs_writeback_keys_done,
967 	&sysfs_writeback_keys_failed,
968 
969 	&sysfs_trigger_gc,
970 	&sysfs_prune_cache,
971 #ifdef CONFIG_BCACHE_DEBUG
972 	&sysfs_verify,
973 	&sysfs_key_merging_disabled,
974 	&sysfs_expensive_debug_checks,
975 #endif
976 	&sysfs_gc_always_rewrite,
977 	&sysfs_btree_shrinker_disabled,
978 	&sysfs_copy_gc_enabled,
979 	&sysfs_gc_after_writeback,
980 	&sysfs_io_disable,
981 	&sysfs_cutoff_writeback,
982 	&sysfs_cutoff_writeback_sync,
983 	NULL
984 };
985 KTYPE(bch_cache_set_internal);
986 
__bch_cache_cmp(const void * l,const void * r)987 static int __bch_cache_cmp(const void *l, const void *r)
988 {
989 	cond_resched();
990 	return *((uint16_t *)r) - *((uint16_t *)l);
991 }
992 
SHOW(__bch_cache)993 SHOW(__bch_cache)
994 {
995 	struct cache *ca = container_of(kobj, struct cache, kobj);
996 
997 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
998 	sysfs_hprint(block_size,	block_bytes(ca));
999 	sysfs_print(nbuckets,		ca->sb.nbuckets);
1000 	sysfs_print(discard,		ca->discard);
1001 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
1002 	sysfs_hprint(btree_written,
1003 		     atomic_long_read(&ca->btree_sectors_written) << 9);
1004 	sysfs_hprint(metadata_written,
1005 		     (atomic_long_read(&ca->meta_sectors_written) +
1006 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
1007 
1008 	sysfs_print(io_errors,
1009 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
1010 
1011 	if (attr == &sysfs_cache_replacement_policy)
1012 		return bch_snprint_string_list(buf, PAGE_SIZE,
1013 					       cache_replacement_policies,
1014 					       CACHE_REPLACEMENT(&ca->sb));
1015 
1016 	if (attr == &sysfs_priority_stats) {
1017 		struct bucket *b;
1018 		size_t n = ca->sb.nbuckets, i;
1019 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
1020 		uint64_t sum = 0;
1021 		/* Compute 31 quantiles */
1022 		uint16_t q[31], *p, *cached;
1023 		ssize_t ret;
1024 
1025 		cached = p = vmalloc(array_size(sizeof(uint16_t),
1026 						ca->sb.nbuckets));
1027 		if (!p)
1028 			return -ENOMEM;
1029 
1030 		mutex_lock(&ca->set->bucket_lock);
1031 		for_each_bucket(b, ca) {
1032 			if (!GC_SECTORS_USED(b))
1033 				unused++;
1034 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
1035 				available++;
1036 			if (GC_MARK(b) == GC_MARK_DIRTY)
1037 				dirty++;
1038 			if (GC_MARK(b) == GC_MARK_METADATA)
1039 				meta++;
1040 		}
1041 
1042 		for (i = ca->sb.first_bucket; i < n; i++)
1043 			p[i] = ca->buckets[i].prio;
1044 		mutex_unlock(&ca->set->bucket_lock);
1045 
1046 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
1047 
1048 		while (n &&
1049 		       !cached[n - 1])
1050 			--n;
1051 
1052 		while (cached < p + n &&
1053 		       *cached == BTREE_PRIO)
1054 			cached++, n--;
1055 
1056 		for (i = 0; i < n; i++)
1057 			sum += INITIAL_PRIO - cached[i];
1058 
1059 		if (n)
1060 			sum = div64_u64(sum, n);
1061 
1062 		for (i = 0; i < ARRAY_SIZE(q); i++)
1063 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1064 				(ARRAY_SIZE(q) + 1)];
1065 
1066 		vfree(p);
1067 
1068 		ret = scnprintf(buf, PAGE_SIZE,
1069 				"Unused:		%zu%%\n"
1070 				"Clean:		%zu%%\n"
1071 				"Dirty:		%zu%%\n"
1072 				"Metadata:	%zu%%\n"
1073 				"Average:	%llu\n"
1074 				"Sectors per Q:	%zu\n"
1075 				"Quantiles:	[",
1076 				unused * 100 / (size_t) ca->sb.nbuckets,
1077 				available * 100 / (size_t) ca->sb.nbuckets,
1078 				dirty * 100 / (size_t) ca->sb.nbuckets,
1079 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
1080 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1081 
1082 		for (i = 0; i < ARRAY_SIZE(q); i++)
1083 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1084 					 "%u ", q[i]);
1085 		ret--;
1086 
1087 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1088 
1089 		return ret;
1090 	}
1091 
1092 	return 0;
1093 }
1094 SHOW_LOCKED(bch_cache)
1095 
STORE(__bch_cache)1096 STORE(__bch_cache)
1097 {
1098 	struct cache *ca = container_of(kobj, struct cache, kobj);
1099 	ssize_t v;
1100 
1101 	/* no user space access if system is rebooting */
1102 	if (bcache_is_reboot)
1103 		return -EBUSY;
1104 
1105 	if (attr == &sysfs_discard) {
1106 		bool v = strtoul_or_return(buf);
1107 
1108 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1109 			ca->discard = v;
1110 
1111 		if (v != CACHE_DISCARD(&ca->sb)) {
1112 			SET_CACHE_DISCARD(&ca->sb, v);
1113 			bcache_write_super(ca->set);
1114 		}
1115 	}
1116 
1117 	if (attr == &sysfs_cache_replacement_policy) {
1118 		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1119 		if (v < 0)
1120 			return v;
1121 
1122 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1123 			mutex_lock(&ca->set->bucket_lock);
1124 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1125 			mutex_unlock(&ca->set->bucket_lock);
1126 
1127 			bcache_write_super(ca->set);
1128 		}
1129 	}
1130 
1131 	if (attr == &sysfs_clear_stats) {
1132 		atomic_long_set(&ca->sectors_written, 0);
1133 		atomic_long_set(&ca->btree_sectors_written, 0);
1134 		atomic_long_set(&ca->meta_sectors_written, 0);
1135 		atomic_set(&ca->io_count, 0);
1136 		atomic_set(&ca->io_errors, 0);
1137 	}
1138 
1139 	return size;
1140 }
1141 STORE_LOCKED(bch_cache)
1142 
1143 static struct attribute *bch_cache_files[] = {
1144 	&sysfs_bucket_size,
1145 	&sysfs_block_size,
1146 	&sysfs_nbuckets,
1147 	&sysfs_priority_stats,
1148 	&sysfs_discard,
1149 	&sysfs_written,
1150 	&sysfs_btree_written,
1151 	&sysfs_metadata_written,
1152 	&sysfs_io_errors,
1153 	&sysfs_clear_stats,
1154 	&sysfs_cache_replacement_policy,
1155 	NULL
1156 };
1157 KTYPE(bch_cache);
1158