• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache setup/teardown code, and some metadata io - read a superblock and
4  * figure out what to do with it.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9 
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "extents.h"
14 #include "request.h"
15 #include "writeback.h"
16 #include "features.h"
17 
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20 #include <linux/genhd.h>
21 #include <linux/idr.h>
22 #include <linux/kthread.h>
23 #include <linux/workqueue.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/reboot.h>
27 #include <linux/sysfs.h>
28 
29 unsigned int bch_cutoff_writeback;
30 unsigned int bch_cutoff_writeback_sync;
31 
32 static const char bcache_magic[] = {
33 	0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
34 	0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35 };
36 
37 static const char invalid_uuid[] = {
38 	0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
39 	0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40 };
41 
42 static struct kobject *bcache_kobj;
43 struct mutex bch_register_lock;
44 bool bcache_is_reboot;
45 LIST_HEAD(bch_cache_sets);
46 static LIST_HEAD(uncached_devices);
47 
48 static int bcache_major;
49 static DEFINE_IDA(bcache_device_idx);
50 static wait_queue_head_t unregister_wait;
51 struct workqueue_struct *bcache_wq;
52 struct workqueue_struct *bch_flush_wq;
53 struct workqueue_struct *bch_journal_wq;
54 
55 
56 #define BTREE_MAX_PAGES		(256 * 1024 / PAGE_SIZE)
57 /* limitation of partitions number on single bcache device */
58 #define BCACHE_MINORS		128
59 /* limitation of bcache devices number on single system */
60 #define BCACHE_DEVICE_IDX_MAX	((1U << MINORBITS)/BCACHE_MINORS)
61 
62 /* Superblock */
63 
get_bucket_size(struct cache_sb * sb,struct cache_sb_disk * s)64 static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
65 {
66 	unsigned int bucket_size = le16_to_cpu(s->bucket_size);
67 
68 	if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
69 		if (bch_has_feature_large_bucket(sb)) {
70 			unsigned int max, order;
71 
72 			max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
73 			order = le16_to_cpu(s->bucket_size);
74 			/*
75 			 * bcache tool will make sure the overflow won't
76 			 * happen, an error message here is enough.
77 			 */
78 			if (order > max)
79 				pr_err("Bucket size (1 << %u) overflows\n",
80 					order);
81 			bucket_size = 1 << order;
82 		} else if (bch_has_feature_obso_large_bucket(sb)) {
83 			bucket_size +=
84 				le16_to_cpu(s->obso_bucket_size_hi) << 16;
85 		}
86 	}
87 
88 	return bucket_size;
89 }
90 
read_super_common(struct cache_sb * sb,struct block_device * bdev,struct cache_sb_disk * s)91 static const char *read_super_common(struct cache_sb *sb,  struct block_device *bdev,
92 				     struct cache_sb_disk *s)
93 {
94 	const char *err;
95 	unsigned int i;
96 
97 	sb->first_bucket= le16_to_cpu(s->first_bucket);
98 	sb->nbuckets	= le64_to_cpu(s->nbuckets);
99 	sb->bucket_size	= get_bucket_size(sb, s);
100 
101 	sb->nr_in_set	= le16_to_cpu(s->nr_in_set);
102 	sb->nr_this_dev	= le16_to_cpu(s->nr_this_dev);
103 
104 	err = "Too many journal buckets";
105 	if (sb->keys > SB_JOURNAL_BUCKETS)
106 		goto err;
107 
108 	err = "Too many buckets";
109 	if (sb->nbuckets > LONG_MAX)
110 		goto err;
111 
112 	err = "Not enough buckets";
113 	if (sb->nbuckets < 1 << 7)
114 		goto err;
115 
116 	err = "Bad block size (not power of 2)";
117 	if (!is_power_of_2(sb->block_size))
118 		goto err;
119 
120 	err = "Bad block size (larger than page size)";
121 	if (sb->block_size > PAGE_SECTORS)
122 		goto err;
123 
124 	err = "Bad bucket size (not power of 2)";
125 	if (!is_power_of_2(sb->bucket_size))
126 		goto err;
127 
128 	err = "Bad bucket size (smaller than page size)";
129 	if (sb->bucket_size < PAGE_SECTORS)
130 		goto err;
131 
132 	err = "Invalid superblock: device too small";
133 	if (get_capacity(bdev->bd_disk) <
134 	    sb->bucket_size * sb->nbuckets)
135 		goto err;
136 
137 	err = "Bad UUID";
138 	if (bch_is_zero(sb->set_uuid, 16))
139 		goto err;
140 
141 	err = "Bad cache device number in set";
142 	if (!sb->nr_in_set ||
143 	    sb->nr_in_set <= sb->nr_this_dev ||
144 	    sb->nr_in_set > MAX_CACHES_PER_SET)
145 		goto err;
146 
147 	err = "Journal buckets not sequential";
148 	for (i = 0; i < sb->keys; i++)
149 		if (sb->d[i] != sb->first_bucket + i)
150 			goto err;
151 
152 	err = "Too many journal buckets";
153 	if (sb->first_bucket + sb->keys > sb->nbuckets)
154 		goto err;
155 
156 	err = "Invalid superblock: first bucket comes before end of super";
157 	if (sb->first_bucket * sb->bucket_size < 16)
158 		goto err;
159 
160 	err = NULL;
161 err:
162 	return err;
163 }
164 
165 
read_super(struct cache_sb * sb,struct block_device * bdev,struct cache_sb_disk ** res)166 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
167 			      struct cache_sb_disk **res)
168 {
169 	const char *err;
170 	struct cache_sb_disk *s;
171 	struct page *page;
172 	unsigned int i;
173 
174 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
175 				   SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
176 	if (IS_ERR(page))
177 		return "IO error";
178 	s = page_address(page) + offset_in_page(SB_OFFSET);
179 
180 	sb->offset		= le64_to_cpu(s->offset);
181 	sb->version		= le64_to_cpu(s->version);
182 
183 	memcpy(sb->magic,	s->magic, 16);
184 	memcpy(sb->uuid,	s->uuid, 16);
185 	memcpy(sb->set_uuid,	s->set_uuid, 16);
186 	memcpy(sb->label,	s->label, SB_LABEL_SIZE);
187 
188 	sb->flags		= le64_to_cpu(s->flags);
189 	sb->seq			= le64_to_cpu(s->seq);
190 	sb->last_mount		= le32_to_cpu(s->last_mount);
191 	sb->keys		= le16_to_cpu(s->keys);
192 
193 	for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
194 		sb->d[i] = le64_to_cpu(s->d[i]);
195 
196 	pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
197 		 sb->version, sb->flags, sb->seq, sb->keys);
198 
199 	err = "Not a bcache superblock (bad offset)";
200 	if (sb->offset != SB_SECTOR)
201 		goto err;
202 
203 	err = "Not a bcache superblock (bad magic)";
204 	if (memcmp(sb->magic, bcache_magic, 16))
205 		goto err;
206 
207 	err = "Bad checksum";
208 	if (s->csum != csum_set(s))
209 		goto err;
210 
211 	err = "Bad UUID";
212 	if (bch_is_zero(sb->uuid, 16))
213 		goto err;
214 
215 	sb->block_size	= le16_to_cpu(s->block_size);
216 
217 	err = "Superblock block size smaller than device block size";
218 	if (sb->block_size << 9 < bdev_logical_block_size(bdev))
219 		goto err;
220 
221 	switch (sb->version) {
222 	case BCACHE_SB_VERSION_BDEV:
223 		sb->data_offset	= BDEV_DATA_START_DEFAULT;
224 		break;
225 	case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
226 	case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
227 		sb->data_offset	= le64_to_cpu(s->data_offset);
228 
229 		err = "Bad data offset";
230 		if (sb->data_offset < BDEV_DATA_START_DEFAULT)
231 			goto err;
232 
233 		break;
234 	case BCACHE_SB_VERSION_CDEV:
235 	case BCACHE_SB_VERSION_CDEV_WITH_UUID:
236 		err = read_super_common(sb, bdev, s);
237 		if (err)
238 			goto err;
239 		break;
240 	case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
241 		/*
242 		 * Feature bits are needed in read_super_common(),
243 		 * convert them firstly.
244 		 */
245 		sb->feature_compat = le64_to_cpu(s->feature_compat);
246 		sb->feature_incompat = le64_to_cpu(s->feature_incompat);
247 		sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
248 
249 		/* Check incompatible features */
250 		err = "Unsupported compatible feature found";
251 		if (bch_has_unknown_compat_features(sb))
252 			goto err;
253 
254 		err = "Unsupported read-only compatible feature found";
255 		if (bch_has_unknown_ro_compat_features(sb))
256 			goto err;
257 
258 		err = "Unsupported incompatible feature found";
259 		if (bch_has_unknown_incompat_features(sb))
260 			goto err;
261 
262 		err = read_super_common(sb, bdev, s);
263 		if (err)
264 			goto err;
265 		break;
266 	default:
267 		err = "Unsupported superblock version";
268 		goto err;
269 	}
270 
271 	sb->last_mount = (u32)ktime_get_real_seconds();
272 	*res = s;
273 	return NULL;
274 err:
275 	put_page(page);
276 	return err;
277 }
278 
write_bdev_super_endio(struct bio * bio)279 static void write_bdev_super_endio(struct bio *bio)
280 {
281 	struct cached_dev *dc = bio->bi_private;
282 
283 	if (bio->bi_status)
284 		bch_count_backing_io_errors(dc, bio);
285 
286 	closure_put(&dc->sb_write);
287 }
288 
__write_super(struct cache_sb * sb,struct cache_sb_disk * out,struct bio * bio)289 static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
290 		struct bio *bio)
291 {
292 	unsigned int i;
293 
294 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
295 	bio->bi_iter.bi_sector	= SB_SECTOR;
296 	__bio_add_page(bio, virt_to_page(out), SB_SIZE,
297 			offset_in_page(out));
298 
299 	out->offset		= cpu_to_le64(sb->offset);
300 
301 	memcpy(out->uuid,	sb->uuid, 16);
302 	memcpy(out->set_uuid,	sb->set_uuid, 16);
303 	memcpy(out->label,	sb->label, SB_LABEL_SIZE);
304 
305 	out->flags		= cpu_to_le64(sb->flags);
306 	out->seq		= cpu_to_le64(sb->seq);
307 
308 	out->last_mount		= cpu_to_le32(sb->last_mount);
309 	out->first_bucket	= cpu_to_le16(sb->first_bucket);
310 	out->keys		= cpu_to_le16(sb->keys);
311 
312 	for (i = 0; i < sb->keys; i++)
313 		out->d[i] = cpu_to_le64(sb->d[i]);
314 
315 	if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
316 		out->feature_compat    = cpu_to_le64(sb->feature_compat);
317 		out->feature_incompat  = cpu_to_le64(sb->feature_incompat);
318 		out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
319 	}
320 
321 	out->version		= cpu_to_le64(sb->version);
322 	out->csum = csum_set(out);
323 
324 	pr_debug("ver %llu, flags %llu, seq %llu\n",
325 		 sb->version, sb->flags, sb->seq);
326 
327 	submit_bio(bio);
328 }
329 
bch_write_bdev_super_unlock(struct closure * cl)330 static void bch_write_bdev_super_unlock(struct closure *cl)
331 {
332 	struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
333 
334 	up(&dc->sb_write_mutex);
335 }
336 
bch_write_bdev_super(struct cached_dev * dc,struct closure * parent)337 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
338 {
339 	struct closure *cl = &dc->sb_write;
340 	struct bio *bio = &dc->sb_bio;
341 
342 	down(&dc->sb_write_mutex);
343 	closure_init(cl, parent);
344 
345 	bio_init(bio, dc->sb_bv, 1);
346 	bio_set_dev(bio, dc->bdev);
347 	bio->bi_end_io	= write_bdev_super_endio;
348 	bio->bi_private = dc;
349 
350 	closure_get(cl);
351 	/* I/O request sent to backing device */
352 	__write_super(&dc->sb, dc->sb_disk, bio);
353 
354 	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
355 }
356 
write_super_endio(struct bio * bio)357 static void write_super_endio(struct bio *bio)
358 {
359 	struct cache *ca = bio->bi_private;
360 
361 	/* is_read = 0 */
362 	bch_count_io_errors(ca, bio->bi_status, 0,
363 			    "writing superblock");
364 	closure_put(&ca->set->sb_write);
365 }
366 
bcache_write_super_unlock(struct closure * cl)367 static void bcache_write_super_unlock(struct closure *cl)
368 {
369 	struct cache_set *c = container_of(cl, struct cache_set, sb_write);
370 
371 	up(&c->sb_write_mutex);
372 }
373 
bcache_write_super(struct cache_set * c)374 void bcache_write_super(struct cache_set *c)
375 {
376 	struct closure *cl = &c->sb_write;
377 	struct cache *ca = c->cache;
378 	struct bio *bio = &ca->sb_bio;
379 	unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
380 
381 	down(&c->sb_write_mutex);
382 	closure_init(cl, &c->cl);
383 
384 	ca->sb.seq++;
385 
386 	if (ca->sb.version < version)
387 		ca->sb.version = version;
388 
389 	bio_init(bio, ca->sb_bv, 1);
390 	bio_set_dev(bio, ca->bdev);
391 	bio->bi_end_io	= write_super_endio;
392 	bio->bi_private = ca;
393 
394 	closure_get(cl);
395 	__write_super(&ca->sb, ca->sb_disk, bio);
396 
397 	closure_return_with_destructor(cl, bcache_write_super_unlock);
398 }
399 
400 /* UUID io */
401 
uuid_endio(struct bio * bio)402 static void uuid_endio(struct bio *bio)
403 {
404 	struct closure *cl = bio->bi_private;
405 	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
406 
407 	cache_set_err_on(bio->bi_status, c, "accessing uuids");
408 	bch_bbio_free(bio, c);
409 	closure_put(cl);
410 }
411 
uuid_io_unlock(struct closure * cl)412 static void uuid_io_unlock(struct closure *cl)
413 {
414 	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
415 
416 	up(&c->uuid_write_mutex);
417 }
418 
uuid_io(struct cache_set * c,int op,unsigned long op_flags,struct bkey * k,struct closure * parent)419 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
420 		    struct bkey *k, struct closure *parent)
421 {
422 	struct closure *cl = &c->uuid_write;
423 	struct uuid_entry *u;
424 	unsigned int i;
425 	char buf[80];
426 
427 	BUG_ON(!parent);
428 	down(&c->uuid_write_mutex);
429 	closure_init(cl, parent);
430 
431 	for (i = 0; i < KEY_PTRS(k); i++) {
432 		struct bio *bio = bch_bbio_alloc(c);
433 
434 		bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
435 		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
436 
437 		bio->bi_end_io	= uuid_endio;
438 		bio->bi_private = cl;
439 		bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
440 		bch_bio_map(bio, c->uuids);
441 
442 		bch_submit_bbio(bio, c, k, i);
443 
444 		if (op != REQ_OP_WRITE)
445 			break;
446 	}
447 
448 	bch_extent_to_text(buf, sizeof(buf), k);
449 	pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
450 
451 	for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
452 		if (!bch_is_zero(u->uuid, 16))
453 			pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
454 				 u - c->uuids, u->uuid, u->label,
455 				 u->first_reg, u->last_reg, u->invalidated);
456 
457 	closure_return_with_destructor(cl, uuid_io_unlock);
458 }
459 
uuid_read(struct cache_set * c,struct jset * j,struct closure * cl)460 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
461 {
462 	struct bkey *k = &j->uuid_bucket;
463 
464 	if (__bch_btree_ptr_invalid(c, k))
465 		return "bad uuid pointer";
466 
467 	bkey_copy(&c->uuid_bucket, k);
468 	uuid_io(c, REQ_OP_READ, 0, k, cl);
469 
470 	if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
471 		struct uuid_entry_v0	*u0 = (void *) c->uuids;
472 		struct uuid_entry	*u1 = (void *) c->uuids;
473 		int i;
474 
475 		closure_sync(cl);
476 
477 		/*
478 		 * Since the new uuid entry is bigger than the old, we have to
479 		 * convert starting at the highest memory address and work down
480 		 * in order to do it in place
481 		 */
482 
483 		for (i = c->nr_uuids - 1;
484 		     i >= 0;
485 		     --i) {
486 			memcpy(u1[i].uuid,	u0[i].uuid, 16);
487 			memcpy(u1[i].label,	u0[i].label, 32);
488 
489 			u1[i].first_reg		= u0[i].first_reg;
490 			u1[i].last_reg		= u0[i].last_reg;
491 			u1[i].invalidated	= u0[i].invalidated;
492 
493 			u1[i].flags	= 0;
494 			u1[i].sectors	= 0;
495 		}
496 	}
497 
498 	return NULL;
499 }
500 
__uuid_write(struct cache_set * c)501 static int __uuid_write(struct cache_set *c)
502 {
503 	BKEY_PADDED(key) k;
504 	struct closure cl;
505 	struct cache *ca = c->cache;
506 	unsigned int size;
507 
508 	closure_init_stack(&cl);
509 	lockdep_assert_held(&bch_register_lock);
510 
511 	if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
512 		return 1;
513 
514 	size =  meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
515 	SET_KEY_SIZE(&k.key, size);
516 	uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
517 	closure_sync(&cl);
518 
519 	/* Only one bucket used for uuid write */
520 	atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
521 
522 	bkey_copy(&c->uuid_bucket, &k.key);
523 	bkey_put(c, &k.key);
524 	return 0;
525 }
526 
bch_uuid_write(struct cache_set * c)527 int bch_uuid_write(struct cache_set *c)
528 {
529 	int ret = __uuid_write(c);
530 
531 	if (!ret)
532 		bch_journal_meta(c, NULL);
533 
534 	return ret;
535 }
536 
uuid_find(struct cache_set * c,const char * uuid)537 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
538 {
539 	struct uuid_entry *u;
540 
541 	for (u = c->uuids;
542 	     u < c->uuids + c->nr_uuids; u++)
543 		if (!memcmp(u->uuid, uuid, 16))
544 			return u;
545 
546 	return NULL;
547 }
548 
uuid_find_empty(struct cache_set * c)549 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
550 {
551 	static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
552 
553 	return uuid_find(c, zero_uuid);
554 }
555 
556 /*
557  * Bucket priorities/gens:
558  *
559  * For each bucket, we store on disk its
560  *   8 bit gen
561  *  16 bit priority
562  *
563  * See alloc.c for an explanation of the gen. The priority is used to implement
564  * lru (and in the future other) cache replacement policies; for most purposes
565  * it's just an opaque integer.
566  *
567  * The gens and the priorities don't have a whole lot to do with each other, and
568  * it's actually the gens that must be written out at specific times - it's no
569  * big deal if the priorities don't get written, if we lose them we just reuse
570  * buckets in suboptimal order.
571  *
572  * On disk they're stored in a packed array, and in as many buckets are required
573  * to fit them all. The buckets we use to store them form a list; the journal
574  * header points to the first bucket, the first bucket points to the second
575  * bucket, et cetera.
576  *
577  * This code is used by the allocation code; periodically (whenever it runs out
578  * of buckets to allocate from) the allocation code will invalidate some
579  * buckets, but it can't use those buckets until their new gens are safely on
580  * disk.
581  */
582 
prio_endio(struct bio * bio)583 static void prio_endio(struct bio *bio)
584 {
585 	struct cache *ca = bio->bi_private;
586 
587 	cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
588 	bch_bbio_free(bio, ca->set);
589 	closure_put(&ca->prio);
590 }
591 
prio_io(struct cache * ca,uint64_t bucket,int op,unsigned long op_flags)592 static void prio_io(struct cache *ca, uint64_t bucket, int op,
593 		    unsigned long op_flags)
594 {
595 	struct closure *cl = &ca->prio;
596 	struct bio *bio = bch_bbio_alloc(ca->set);
597 
598 	closure_init_stack(cl);
599 
600 	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
601 	bio_set_dev(bio, ca->bdev);
602 	bio->bi_iter.bi_size	= meta_bucket_bytes(&ca->sb);
603 
604 	bio->bi_end_io	= prio_endio;
605 	bio->bi_private = ca;
606 	bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
607 	bch_bio_map(bio, ca->disk_buckets);
608 
609 	closure_bio_submit(ca->set, bio, &ca->prio);
610 	closure_sync(cl);
611 }
612 
bch_prio_write(struct cache * ca,bool wait)613 int bch_prio_write(struct cache *ca, bool wait)
614 {
615 	int i;
616 	struct bucket *b;
617 	struct closure cl;
618 
619 	pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
620 		 fifo_used(&ca->free[RESERVE_PRIO]),
621 		 fifo_used(&ca->free[RESERVE_NONE]),
622 		 fifo_used(&ca->free_inc));
623 
624 	/*
625 	 * Pre-check if there are enough free buckets. In the non-blocking
626 	 * scenario it's better to fail early rather than starting to allocate
627 	 * buckets and do a cleanup later in case of failure.
628 	 */
629 	if (!wait) {
630 		size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
631 			       fifo_used(&ca->free[RESERVE_NONE]);
632 		if (prio_buckets(ca) > avail)
633 			return -ENOMEM;
634 	}
635 
636 	closure_init_stack(&cl);
637 
638 	lockdep_assert_held(&ca->set->bucket_lock);
639 
640 	ca->disk_buckets->seq++;
641 
642 	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
643 			&ca->meta_sectors_written);
644 
645 	for (i = prio_buckets(ca) - 1; i >= 0; --i) {
646 		long bucket;
647 		struct prio_set *p = ca->disk_buckets;
648 		struct bucket_disk *d = p->data;
649 		struct bucket_disk *end = d + prios_per_bucket(ca);
650 
651 		for (b = ca->buckets + i * prios_per_bucket(ca);
652 		     b < ca->buckets + ca->sb.nbuckets && d < end;
653 		     b++, d++) {
654 			d->prio = cpu_to_le16(b->prio);
655 			d->gen = b->gen;
656 		}
657 
658 		p->next_bucket	= ca->prio_buckets[i + 1];
659 		p->magic	= pset_magic(&ca->sb);
660 		p->csum		= bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
661 
662 		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
663 		BUG_ON(bucket == -1);
664 
665 		mutex_unlock(&ca->set->bucket_lock);
666 		prio_io(ca, bucket, REQ_OP_WRITE, 0);
667 		mutex_lock(&ca->set->bucket_lock);
668 
669 		ca->prio_buckets[i] = bucket;
670 		atomic_dec_bug(&ca->buckets[bucket].pin);
671 	}
672 
673 	mutex_unlock(&ca->set->bucket_lock);
674 
675 	bch_journal_meta(ca->set, &cl);
676 	closure_sync(&cl);
677 
678 	mutex_lock(&ca->set->bucket_lock);
679 
680 	/*
681 	 * Don't want the old priorities to get garbage collected until after we
682 	 * finish writing the new ones, and they're journalled
683 	 */
684 	for (i = 0; i < prio_buckets(ca); i++) {
685 		if (ca->prio_last_buckets[i])
686 			__bch_bucket_free(ca,
687 				&ca->buckets[ca->prio_last_buckets[i]]);
688 
689 		ca->prio_last_buckets[i] = ca->prio_buckets[i];
690 	}
691 	return 0;
692 }
693 
prio_read(struct cache * ca,uint64_t bucket)694 static int prio_read(struct cache *ca, uint64_t bucket)
695 {
696 	struct prio_set *p = ca->disk_buckets;
697 	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
698 	struct bucket *b;
699 	unsigned int bucket_nr = 0;
700 	int ret = -EIO;
701 
702 	for (b = ca->buckets;
703 	     b < ca->buckets + ca->sb.nbuckets;
704 	     b++, d++) {
705 		if (d == end) {
706 			ca->prio_buckets[bucket_nr] = bucket;
707 			ca->prio_last_buckets[bucket_nr] = bucket;
708 			bucket_nr++;
709 
710 			prio_io(ca, bucket, REQ_OP_READ, 0);
711 
712 			if (p->csum !=
713 			    bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
714 				pr_warn("bad csum reading priorities\n");
715 				goto out;
716 			}
717 
718 			if (p->magic != pset_magic(&ca->sb)) {
719 				pr_warn("bad magic reading priorities\n");
720 				goto out;
721 			}
722 
723 			bucket = p->next_bucket;
724 			d = p->data;
725 		}
726 
727 		b->prio = le16_to_cpu(d->prio);
728 		b->gen = b->last_gc = d->gen;
729 	}
730 
731 	ret = 0;
732 out:
733 	return ret;
734 }
735 
736 /* Bcache device */
737 
open_dev(struct block_device * b,fmode_t mode)738 static int open_dev(struct block_device *b, fmode_t mode)
739 {
740 	struct bcache_device *d = b->bd_disk->private_data;
741 
742 	if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
743 		return -ENXIO;
744 
745 	closure_get(&d->cl);
746 	return 0;
747 }
748 
release_dev(struct gendisk * b,fmode_t mode)749 static void release_dev(struct gendisk *b, fmode_t mode)
750 {
751 	struct bcache_device *d = b->private_data;
752 
753 	closure_put(&d->cl);
754 }
755 
ioctl_dev(struct block_device * b,fmode_t mode,unsigned int cmd,unsigned long arg)756 static int ioctl_dev(struct block_device *b, fmode_t mode,
757 		     unsigned int cmd, unsigned long arg)
758 {
759 	struct bcache_device *d = b->bd_disk->private_data;
760 
761 	return d->ioctl(d, mode, cmd, arg);
762 }
763 
764 static const struct block_device_operations bcache_cached_ops = {
765 	.submit_bio	= cached_dev_submit_bio,
766 	.open		= open_dev,
767 	.release	= release_dev,
768 	.ioctl		= ioctl_dev,
769 	.owner		= THIS_MODULE,
770 };
771 
772 static const struct block_device_operations bcache_flash_ops = {
773 	.submit_bio	= flash_dev_submit_bio,
774 	.open		= open_dev,
775 	.release	= release_dev,
776 	.ioctl		= ioctl_dev,
777 	.owner		= THIS_MODULE,
778 };
779 
bcache_device_stop(struct bcache_device * d)780 void bcache_device_stop(struct bcache_device *d)
781 {
782 	if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
783 		/*
784 		 * closure_fn set to
785 		 * - cached device: cached_dev_flush()
786 		 * - flash dev: flash_dev_flush()
787 		 */
788 		closure_queue(&d->cl);
789 }
790 
bcache_device_unlink(struct bcache_device * d)791 static void bcache_device_unlink(struct bcache_device *d)
792 {
793 	lockdep_assert_held(&bch_register_lock);
794 
795 	if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
796 		struct cache *ca = d->c->cache;
797 
798 		sysfs_remove_link(&d->c->kobj, d->name);
799 		sysfs_remove_link(&d->kobj, "cache");
800 
801 		bd_unlink_disk_holder(ca->bdev, d->disk);
802 	}
803 }
804 
bcache_device_link(struct bcache_device * d,struct cache_set * c,const char * name)805 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
806 			       const char *name)
807 {
808 	struct cache *ca = c->cache;
809 	int ret;
810 
811 	bd_link_disk_holder(ca->bdev, d->disk);
812 
813 	snprintf(d->name, BCACHEDEVNAME_SIZE,
814 		 "%s%u", name, d->id);
815 
816 	ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
817 	if (ret < 0)
818 		pr_err("Couldn't create device -> cache set symlink\n");
819 
820 	ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
821 	if (ret < 0)
822 		pr_err("Couldn't create cache set -> device symlink\n");
823 
824 	clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
825 }
826 
bcache_device_detach(struct bcache_device * d)827 static void bcache_device_detach(struct bcache_device *d)
828 {
829 	lockdep_assert_held(&bch_register_lock);
830 
831 	atomic_dec(&d->c->attached_dev_nr);
832 
833 	if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
834 		struct uuid_entry *u = d->c->uuids + d->id;
835 
836 		SET_UUID_FLASH_ONLY(u, 0);
837 		memcpy(u->uuid, invalid_uuid, 16);
838 		u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
839 		bch_uuid_write(d->c);
840 	}
841 
842 	bcache_device_unlink(d);
843 
844 	d->c->devices[d->id] = NULL;
845 	closure_put(&d->c->caching);
846 	d->c = NULL;
847 }
848 
bcache_device_attach(struct bcache_device * d,struct cache_set * c,unsigned int id)849 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
850 				 unsigned int id)
851 {
852 	d->id = id;
853 	d->c = c;
854 	c->devices[id] = d;
855 
856 	if (id >= c->devices_max_used)
857 		c->devices_max_used = id + 1;
858 
859 	closure_get(&c->caching);
860 }
861 
first_minor_to_idx(int first_minor)862 static inline int first_minor_to_idx(int first_minor)
863 {
864 	return (first_minor/BCACHE_MINORS);
865 }
866 
idx_to_first_minor(int idx)867 static inline int idx_to_first_minor(int idx)
868 {
869 	return (idx * BCACHE_MINORS);
870 }
871 
bcache_device_free(struct bcache_device * d)872 static void bcache_device_free(struct bcache_device *d)
873 {
874 	struct gendisk *disk = d->disk;
875 
876 	lockdep_assert_held(&bch_register_lock);
877 
878 	if (disk)
879 		pr_info("%s stopped\n", disk->disk_name);
880 	else
881 		pr_err("bcache device (NULL gendisk) stopped\n");
882 
883 	if (d->c)
884 		bcache_device_detach(d);
885 
886 	if (disk) {
887 		bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
888 
889 		if (disk_added)
890 			del_gendisk(disk);
891 
892 		if (disk->queue)
893 			blk_cleanup_queue(disk->queue);
894 
895 		ida_simple_remove(&bcache_device_idx,
896 				  first_minor_to_idx(disk->first_minor));
897 		if (disk_added)
898 			put_disk(disk);
899 	}
900 
901 	bioset_exit(&d->bio_split);
902 	kvfree(d->full_dirty_stripes);
903 	kvfree(d->stripe_sectors_dirty);
904 
905 	closure_debug_destroy(&d->cl);
906 }
907 
bcache_device_init(struct bcache_device * d,unsigned int block_size,sector_t sectors,struct block_device * cached_bdev,const struct block_device_operations * ops)908 static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
909 		sector_t sectors, struct block_device *cached_bdev,
910 		const struct block_device_operations *ops)
911 {
912 	struct request_queue *q;
913 	const size_t max_stripes = min_t(size_t, INT_MAX,
914 					 SIZE_MAX / sizeof(atomic_t));
915 	uint64_t n;
916 	int idx;
917 
918 	if (!d->stripe_size)
919 		d->stripe_size = 1 << 31;
920 
921 	n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
922 	if (!n || n > max_stripes) {
923 		pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
924 			n);
925 		return -ENOMEM;
926 	}
927 	d->nr_stripes = n;
928 
929 	n = d->nr_stripes * sizeof(atomic_t);
930 	d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
931 	if (!d->stripe_sectors_dirty)
932 		return -ENOMEM;
933 
934 	n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
935 	d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
936 	if (!d->full_dirty_stripes)
937 		goto out_free_stripe_sectors_dirty;
938 
939 	idx = ida_simple_get(&bcache_device_idx, 0,
940 				BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
941 	if (idx < 0)
942 		goto out_free_full_dirty_stripes;
943 
944 	if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
945 			BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
946 		goto out_ida_remove;
947 
948 	d->disk = alloc_disk(BCACHE_MINORS);
949 	if (!d->disk)
950 		goto out_bioset_exit;
951 
952 	set_capacity(d->disk, sectors);
953 	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
954 
955 	d->disk->major		= bcache_major;
956 	d->disk->first_minor	= idx_to_first_minor(idx);
957 	d->disk->fops		= ops;
958 	d->disk->private_data	= d;
959 
960 	q = blk_alloc_queue(NUMA_NO_NODE);
961 	if (!q)
962 		return -ENOMEM;
963 
964 	d->disk->queue			= q;
965 	q->limits.max_hw_sectors	= UINT_MAX;
966 	q->limits.max_sectors		= UINT_MAX;
967 	q->limits.max_segment_size	= UINT_MAX;
968 	q->limits.max_segments		= BIO_MAX_PAGES;
969 	blk_queue_max_discard_sectors(q, UINT_MAX);
970 	q->limits.discard_granularity	= 512;
971 	q->limits.io_min		= block_size;
972 	q->limits.logical_block_size	= block_size;
973 	q->limits.physical_block_size	= block_size;
974 
975 	if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
976 		/*
977 		 * This should only happen with BCACHE_SB_VERSION_BDEV.
978 		 * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
979 		 */
980 		pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
981 			d->disk->disk_name, q->limits.logical_block_size,
982 			PAGE_SIZE, bdev_logical_block_size(cached_bdev));
983 
984 		/* This also adjusts physical block size/min io size if needed */
985 		blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
986 	}
987 
988 	blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
989 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
990 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
991 
992 	blk_queue_write_cache(q, true, true);
993 
994 	return 0;
995 
996 out_bioset_exit:
997 	bioset_exit(&d->bio_split);
998 out_ida_remove:
999 	ida_simple_remove(&bcache_device_idx, idx);
1000 out_free_full_dirty_stripes:
1001 	kvfree(d->full_dirty_stripes);
1002 out_free_stripe_sectors_dirty:
1003 	kvfree(d->stripe_sectors_dirty);
1004 	return -ENOMEM;
1005 
1006 }
1007 
1008 /* Cached device */
1009 
calc_cached_dev_sectors(struct cache_set * c)1010 static void calc_cached_dev_sectors(struct cache_set *c)
1011 {
1012 	uint64_t sectors = 0;
1013 	struct cached_dev *dc;
1014 
1015 	list_for_each_entry(dc, &c->cached_devs, list)
1016 		sectors += bdev_sectors(dc->bdev);
1017 
1018 	c->cached_dev_sectors = sectors;
1019 }
1020 
1021 #define BACKING_DEV_OFFLINE_TIMEOUT 5
cached_dev_status_update(void * arg)1022 static int cached_dev_status_update(void *arg)
1023 {
1024 	struct cached_dev *dc = arg;
1025 	struct request_queue *q;
1026 
1027 	/*
1028 	 * If this delayed worker is stopping outside, directly quit here.
1029 	 * dc->io_disable might be set via sysfs interface, so check it
1030 	 * here too.
1031 	 */
1032 	while (!kthread_should_stop() && !dc->io_disable) {
1033 		q = bdev_get_queue(dc->bdev);
1034 		if (blk_queue_dying(q))
1035 			dc->offline_seconds++;
1036 		else
1037 			dc->offline_seconds = 0;
1038 
1039 		if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
1040 			pr_err("%s: device offline for %d seconds\n",
1041 			       dc->backing_dev_name,
1042 			       BACKING_DEV_OFFLINE_TIMEOUT);
1043 			pr_err("%s: disable I/O request due to backing device offline\n",
1044 			       dc->disk.name);
1045 			dc->io_disable = true;
1046 			/* let others know earlier that io_disable is true */
1047 			smp_mb();
1048 			bcache_device_stop(&dc->disk);
1049 			break;
1050 		}
1051 		schedule_timeout_interruptible(HZ);
1052 	}
1053 
1054 	wait_for_kthread_stop();
1055 	return 0;
1056 }
1057 
1058 
bch_cached_dev_run(struct cached_dev * dc)1059 int bch_cached_dev_run(struct cached_dev *dc)
1060 {
1061 	struct bcache_device *d = &dc->disk;
1062 	char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
1063 	char *env[] = {
1064 		"DRIVER=bcache",
1065 		kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
1066 		kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
1067 		NULL,
1068 	};
1069 
1070 	if (dc->io_disable) {
1071 		pr_err("I/O disabled on cached dev %s\n",
1072 		       dc->backing_dev_name);
1073 		kfree(env[1]);
1074 		kfree(env[2]);
1075 		kfree(buf);
1076 		return -EIO;
1077 	}
1078 
1079 	if (atomic_xchg(&dc->running, 1)) {
1080 		kfree(env[1]);
1081 		kfree(env[2]);
1082 		kfree(buf);
1083 		pr_info("cached dev %s is running already\n",
1084 		       dc->backing_dev_name);
1085 		return -EBUSY;
1086 	}
1087 
1088 	if (!d->c &&
1089 	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
1090 		struct closure cl;
1091 
1092 		closure_init_stack(&cl);
1093 
1094 		SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
1095 		bch_write_bdev_super(dc, &cl);
1096 		closure_sync(&cl);
1097 	}
1098 
1099 	add_disk(d->disk);
1100 	bd_link_disk_holder(dc->bdev, dc->disk.disk);
1101 	/*
1102 	 * won't show up in the uevent file, use udevadm monitor -e instead
1103 	 * only class / kset properties are persistent
1104 	 */
1105 	kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
1106 	kfree(env[1]);
1107 	kfree(env[2]);
1108 	kfree(buf);
1109 
1110 	if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
1111 	    sysfs_create_link(&disk_to_dev(d->disk)->kobj,
1112 			      &d->kobj, "bcache")) {
1113 		pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
1114 		return -ENOMEM;
1115 	}
1116 
1117 	dc->status_update_thread = kthread_run(cached_dev_status_update,
1118 					       dc, "bcache_status_update");
1119 	if (IS_ERR(dc->status_update_thread)) {
1120 		pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 /*
1127  * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
1128  * work dc->writeback_rate_update is running. Wait until the routine
1129  * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
1130  * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
1131  * seconds, give up waiting here and continue to cancel it too.
1132  */
cancel_writeback_rate_update_dwork(struct cached_dev * dc)1133 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
1134 {
1135 	int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
1136 
1137 	do {
1138 		if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
1139 			      &dc->disk.flags))
1140 			break;
1141 		time_out--;
1142 		schedule_timeout_interruptible(1);
1143 	} while (time_out > 0);
1144 
1145 	if (time_out == 0)
1146 		pr_warn("give up waiting for dc->writeback_write_update to quit\n");
1147 
1148 	cancel_delayed_work_sync(&dc->writeback_rate_update);
1149 }
1150 
cached_dev_detach_finish(struct work_struct * w)1151 static void cached_dev_detach_finish(struct work_struct *w)
1152 {
1153 	struct cached_dev *dc = container_of(w, struct cached_dev, detach);
1154 	struct closure cl;
1155 
1156 	closure_init_stack(&cl);
1157 
1158 	BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
1159 	BUG_ON(refcount_read(&dc->count));
1160 
1161 
1162 	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1163 		cancel_writeback_rate_update_dwork(dc);
1164 
1165 	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
1166 		kthread_stop(dc->writeback_thread);
1167 		dc->writeback_thread = NULL;
1168 	}
1169 
1170 	memset(&dc->sb.set_uuid, 0, 16);
1171 	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
1172 
1173 	bch_write_bdev_super(dc, &cl);
1174 	closure_sync(&cl);
1175 
1176 	mutex_lock(&bch_register_lock);
1177 
1178 	calc_cached_dev_sectors(dc->disk.c);
1179 	bcache_device_detach(&dc->disk);
1180 	list_move(&dc->list, &uncached_devices);
1181 
1182 	clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
1183 	clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1184 
1185 	mutex_unlock(&bch_register_lock);
1186 
1187 	pr_info("Caching disabled for %s\n", dc->backing_dev_name);
1188 
1189 	/* Drop ref we took in cached_dev_detach() */
1190 	closure_put(&dc->disk.cl);
1191 }
1192 
bch_cached_dev_detach(struct cached_dev * dc)1193 void bch_cached_dev_detach(struct cached_dev *dc)
1194 {
1195 	lockdep_assert_held(&bch_register_lock);
1196 
1197 	if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1198 		return;
1199 
1200 	if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1201 		return;
1202 
1203 	/*
1204 	 * Block the device from being closed and freed until we're finished
1205 	 * detaching
1206 	 */
1207 	closure_get(&dc->disk.cl);
1208 
1209 	bch_writeback_queue(dc);
1210 
1211 	cached_dev_put(dc);
1212 }
1213 
bch_cached_dev_attach(struct cached_dev * dc,struct cache_set * c,uint8_t * set_uuid)1214 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1215 			  uint8_t *set_uuid)
1216 {
1217 	uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
1218 	struct uuid_entry *u;
1219 	struct cached_dev *exist_dc, *t;
1220 	int ret = 0;
1221 
1222 	if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
1223 	    (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
1224 		return -ENOENT;
1225 
1226 	if (dc->disk.c) {
1227 		pr_err("Can't attach %s: already attached\n",
1228 		       dc->backing_dev_name);
1229 		return -EINVAL;
1230 	}
1231 
1232 	if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1233 		pr_err("Can't attach %s: shutting down\n",
1234 		       dc->backing_dev_name);
1235 		return -EINVAL;
1236 	}
1237 
1238 	if (dc->sb.block_size < c->cache->sb.block_size) {
1239 		/* Will die */
1240 		pr_err("Couldn't attach %s: block size less than set's block size\n",
1241 		       dc->backing_dev_name);
1242 		return -EINVAL;
1243 	}
1244 
1245 	/* Check whether already attached */
1246 	list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1247 		if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1248 			pr_err("Tried to attach %s but duplicate UUID already attached\n",
1249 				dc->backing_dev_name);
1250 
1251 			return -EINVAL;
1252 		}
1253 	}
1254 
1255 	u = uuid_find(c, dc->sb.uuid);
1256 
1257 	if (u &&
1258 	    (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
1259 	     BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
1260 		memcpy(u->uuid, invalid_uuid, 16);
1261 		u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
1262 		u = NULL;
1263 	}
1264 
1265 	if (!u) {
1266 		if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1267 			pr_err("Couldn't find uuid for %s in set\n",
1268 			       dc->backing_dev_name);
1269 			return -ENOENT;
1270 		}
1271 
1272 		u = uuid_find_empty(c);
1273 		if (!u) {
1274 			pr_err("Not caching %s, no room for UUID\n",
1275 			       dc->backing_dev_name);
1276 			return -EINVAL;
1277 		}
1278 	}
1279 
1280 	/*
1281 	 * Deadlocks since we're called via sysfs...
1282 	 * sysfs_remove_file(&dc->kobj, &sysfs_attach);
1283 	 */
1284 
1285 	if (bch_is_zero(u->uuid, 16)) {
1286 		struct closure cl;
1287 
1288 		closure_init_stack(&cl);
1289 
1290 		memcpy(u->uuid, dc->sb.uuid, 16);
1291 		memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1292 		u->first_reg = u->last_reg = rtime;
1293 		bch_uuid_write(c);
1294 
1295 		memcpy(dc->sb.set_uuid, c->set_uuid, 16);
1296 		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1297 
1298 		bch_write_bdev_super(dc, &cl);
1299 		closure_sync(&cl);
1300 	} else {
1301 		u->last_reg = rtime;
1302 		bch_uuid_write(c);
1303 	}
1304 
1305 	bcache_device_attach(&dc->disk, c, u - c->uuids);
1306 	list_move(&dc->list, &c->cached_devs);
1307 	calc_cached_dev_sectors(c);
1308 
1309 	/*
1310 	 * dc->c must be set before dc->count != 0 - paired with the mb in
1311 	 * cached_dev_get()
1312 	 */
1313 	smp_wmb();
1314 	refcount_set(&dc->count, 1);
1315 
1316 	/* Block writeback thread, but spawn it */
1317 	down_write(&dc->writeback_lock);
1318 	if (bch_cached_dev_writeback_start(dc)) {
1319 		up_write(&dc->writeback_lock);
1320 		pr_err("Couldn't start writeback facilities for %s\n",
1321 		       dc->disk.disk->disk_name);
1322 		return -ENOMEM;
1323 	}
1324 
1325 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1326 		atomic_set(&dc->has_dirty, 1);
1327 		bch_writeback_queue(dc);
1328 	}
1329 
1330 	bch_sectors_dirty_init(&dc->disk);
1331 
1332 	ret = bch_cached_dev_run(dc);
1333 	if (ret && (ret != -EBUSY)) {
1334 		up_write(&dc->writeback_lock);
1335 		/*
1336 		 * bch_register_lock is held, bcache_device_stop() is not
1337 		 * able to be directly called. The kthread and kworker
1338 		 * created previously in bch_cached_dev_writeback_start()
1339 		 * have to be stopped manually here.
1340 		 */
1341 		kthread_stop(dc->writeback_thread);
1342 		cancel_writeback_rate_update_dwork(dc);
1343 		pr_err("Couldn't run cached device %s\n",
1344 		       dc->backing_dev_name);
1345 		return ret;
1346 	}
1347 
1348 	bcache_device_link(&dc->disk, c, "bdev");
1349 	atomic_inc(&c->attached_dev_nr);
1350 
1351 	if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
1352 		pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
1353 		pr_err("Please update to the latest bcache-tools to create the cache device\n");
1354 		set_disk_ro(dc->disk.disk, 1);
1355 	}
1356 
1357 	/* Allow the writeback thread to proceed */
1358 	up_write(&dc->writeback_lock);
1359 
1360 	pr_info("Caching %s as %s on set %pU\n",
1361 		dc->backing_dev_name,
1362 		dc->disk.disk->disk_name,
1363 		dc->disk.c->set_uuid);
1364 	return 0;
1365 }
1366 
1367 /* when dc->disk.kobj released */
bch_cached_dev_release(struct kobject * kobj)1368 void bch_cached_dev_release(struct kobject *kobj)
1369 {
1370 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
1371 					     disk.kobj);
1372 	kfree(dc);
1373 	module_put(THIS_MODULE);
1374 }
1375 
cached_dev_free(struct closure * cl)1376 static void cached_dev_free(struct closure *cl)
1377 {
1378 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1379 
1380 	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1381 		cancel_writeback_rate_update_dwork(dc);
1382 
1383 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
1384 		kthread_stop(dc->writeback_thread);
1385 	if (!IS_ERR_OR_NULL(dc->status_update_thread))
1386 		kthread_stop(dc->status_update_thread);
1387 
1388 	mutex_lock(&bch_register_lock);
1389 
1390 	if (atomic_read(&dc->running))
1391 		bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1392 	bcache_device_free(&dc->disk);
1393 	list_del(&dc->list);
1394 
1395 	mutex_unlock(&bch_register_lock);
1396 
1397 	if (dc->sb_disk)
1398 		put_page(virt_to_page(dc->sb_disk));
1399 
1400 	if (!IS_ERR_OR_NULL(dc->bdev))
1401 		blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1402 
1403 	wake_up(&unregister_wait);
1404 
1405 	kobject_put(&dc->disk.kobj);
1406 }
1407 
cached_dev_flush(struct closure * cl)1408 static void cached_dev_flush(struct closure *cl)
1409 {
1410 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1411 	struct bcache_device *d = &dc->disk;
1412 
1413 	mutex_lock(&bch_register_lock);
1414 	bcache_device_unlink(d);
1415 	mutex_unlock(&bch_register_lock);
1416 
1417 	bch_cache_accounting_destroy(&dc->accounting);
1418 	kobject_del(&d->kobj);
1419 
1420 	continue_at(cl, cached_dev_free, system_wq);
1421 }
1422 
cached_dev_init(struct cached_dev * dc,unsigned int block_size)1423 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
1424 {
1425 	int ret;
1426 	struct io *io;
1427 	struct request_queue *q = bdev_get_queue(dc->bdev);
1428 
1429 	__module_get(THIS_MODULE);
1430 	INIT_LIST_HEAD(&dc->list);
1431 	closure_init(&dc->disk.cl, NULL);
1432 	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1433 	kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1434 	INIT_WORK(&dc->detach, cached_dev_detach_finish);
1435 	sema_init(&dc->sb_write_mutex, 1);
1436 	INIT_LIST_HEAD(&dc->io_lru);
1437 	spin_lock_init(&dc->io_lock);
1438 	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1439 
1440 	dc->sequential_cutoff		= 4 << 20;
1441 
1442 	for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1443 		list_add(&io->lru, &dc->io_lru);
1444 		hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1445 	}
1446 
1447 	dc->disk.stripe_size = q->limits.io_opt >> 9;
1448 
1449 	if (dc->disk.stripe_size)
1450 		dc->partial_stripes_expensive =
1451 			q->limits.raid_partial_stripes_expensive;
1452 
1453 	ret = bcache_device_init(&dc->disk, block_size,
1454 			 dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
1455 			 dc->bdev, &bcache_cached_ops);
1456 	if (ret)
1457 		return ret;
1458 
1459 	blk_queue_io_opt(dc->disk.disk->queue,
1460 		max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
1461 
1462 	atomic_set(&dc->io_errors, 0);
1463 	dc->io_disable = false;
1464 	dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1465 	/* default to auto */
1466 	dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
1467 
1468 	bch_cached_dev_request_init(dc);
1469 	bch_cached_dev_writeback_init(dc);
1470 	return 0;
1471 }
1472 
1473 /* Cached device - bcache superblock */
1474 
register_bdev(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct block_device * bdev,struct cached_dev * dc)1475 static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
1476 				 struct block_device *bdev,
1477 				 struct cached_dev *dc)
1478 {
1479 	const char *err = "cannot allocate memory";
1480 	struct cache_set *c;
1481 	int ret = -ENOMEM;
1482 
1483 	bdevname(bdev, dc->backing_dev_name);
1484 	memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1485 	dc->bdev = bdev;
1486 	dc->bdev->bd_holder = dc;
1487 	dc->sb_disk = sb_disk;
1488 
1489 	if (cached_dev_init(dc, sb->block_size << 9))
1490 		goto err;
1491 
1492 	err = "error creating kobject";
1493 	if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1494 			"bcache"))
1495 		goto err;
1496 	if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1497 		goto err;
1498 
1499 	pr_info("registered backing device %s\n", dc->backing_dev_name);
1500 
1501 	list_add(&dc->list, &uncached_devices);
1502 	/* attach to a matched cache set if it exists */
1503 	list_for_each_entry(c, &bch_cache_sets, list)
1504 		bch_cached_dev_attach(dc, c, NULL);
1505 
1506 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1507 	    BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
1508 		err = "failed to run cached device";
1509 		ret = bch_cached_dev_run(dc);
1510 		if (ret)
1511 			goto err;
1512 	}
1513 
1514 	return 0;
1515 err:
1516 	pr_notice("error %s: %s\n", dc->backing_dev_name, err);
1517 	bcache_device_stop(&dc->disk);
1518 	return ret;
1519 }
1520 
1521 /* Flash only volumes */
1522 
1523 /* When d->kobj released */
bch_flash_dev_release(struct kobject * kobj)1524 void bch_flash_dev_release(struct kobject *kobj)
1525 {
1526 	struct bcache_device *d = container_of(kobj, struct bcache_device,
1527 					       kobj);
1528 	kfree(d);
1529 }
1530 
flash_dev_free(struct closure * cl)1531 static void flash_dev_free(struct closure *cl)
1532 {
1533 	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1534 
1535 	mutex_lock(&bch_register_lock);
1536 	atomic_long_sub(bcache_dev_sectors_dirty(d),
1537 			&d->c->flash_dev_dirty_sectors);
1538 	bcache_device_free(d);
1539 	mutex_unlock(&bch_register_lock);
1540 	kobject_put(&d->kobj);
1541 }
1542 
flash_dev_flush(struct closure * cl)1543 static void flash_dev_flush(struct closure *cl)
1544 {
1545 	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1546 
1547 	mutex_lock(&bch_register_lock);
1548 	bcache_device_unlink(d);
1549 	mutex_unlock(&bch_register_lock);
1550 	kobject_del(&d->kobj);
1551 	continue_at(cl, flash_dev_free, system_wq);
1552 }
1553 
flash_dev_run(struct cache_set * c,struct uuid_entry * u)1554 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1555 {
1556 	struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1557 					  GFP_KERNEL);
1558 	if (!d)
1559 		return -ENOMEM;
1560 
1561 	closure_init(&d->cl, NULL);
1562 	set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1563 
1564 	kobject_init(&d->kobj, &bch_flash_dev_ktype);
1565 
1566 	if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
1567 			NULL, &bcache_flash_ops))
1568 		goto err;
1569 
1570 	bcache_device_attach(d, c, u - c->uuids);
1571 	bch_sectors_dirty_init(d);
1572 	bch_flash_dev_request_init(d);
1573 	add_disk(d->disk);
1574 
1575 	if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1576 		goto err;
1577 
1578 	bcache_device_link(d, c, "volume");
1579 
1580 	if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
1581 		pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
1582 		pr_err("Please update to the latest bcache-tools to create the cache device\n");
1583 		set_disk_ro(d->disk, 1);
1584 	}
1585 
1586 	return 0;
1587 err:
1588 	kobject_put(&d->kobj);
1589 	return -ENOMEM;
1590 }
1591 
flash_devs_run(struct cache_set * c)1592 static int flash_devs_run(struct cache_set *c)
1593 {
1594 	int ret = 0;
1595 	struct uuid_entry *u;
1596 
1597 	for (u = c->uuids;
1598 	     u < c->uuids + c->nr_uuids && !ret;
1599 	     u++)
1600 		if (UUID_FLASH_ONLY(u))
1601 			ret = flash_dev_run(c, u);
1602 
1603 	return ret;
1604 }
1605 
bch_flash_dev_create(struct cache_set * c,uint64_t size)1606 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1607 {
1608 	struct uuid_entry *u;
1609 
1610 	if (test_bit(CACHE_SET_STOPPING, &c->flags))
1611 		return -EINTR;
1612 
1613 	if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1614 		return -EPERM;
1615 
1616 	u = uuid_find_empty(c);
1617 	if (!u) {
1618 		pr_err("Can't create volume, no room for UUID\n");
1619 		return -EINVAL;
1620 	}
1621 
1622 	get_random_bytes(u->uuid, 16);
1623 	memset(u->label, 0, 32);
1624 	u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
1625 
1626 	SET_UUID_FLASH_ONLY(u, 1);
1627 	u->sectors = size >> 9;
1628 
1629 	bch_uuid_write(c);
1630 
1631 	return flash_dev_run(c, u);
1632 }
1633 
bch_cached_dev_error(struct cached_dev * dc)1634 bool bch_cached_dev_error(struct cached_dev *dc)
1635 {
1636 	if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1637 		return false;
1638 
1639 	dc->io_disable = true;
1640 	/* make others know io_disable is true earlier */
1641 	smp_mb();
1642 
1643 	pr_err("stop %s: too many IO errors on backing device %s\n",
1644 	       dc->disk.disk->disk_name, dc->backing_dev_name);
1645 
1646 	bcache_device_stop(&dc->disk);
1647 	return true;
1648 }
1649 
1650 /* Cache set */
1651 
1652 __printf(2, 3)
bch_cache_set_error(struct cache_set * c,const char * fmt,...)1653 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1654 {
1655 	struct va_format vaf;
1656 	va_list args;
1657 
1658 	if (c->on_error != ON_ERROR_PANIC &&
1659 	    test_bit(CACHE_SET_STOPPING, &c->flags))
1660 		return false;
1661 
1662 	if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1663 		pr_info("CACHE_SET_IO_DISABLE already set\n");
1664 
1665 	/*
1666 	 * XXX: we can be called from atomic context
1667 	 * acquire_console_sem();
1668 	 */
1669 
1670 	va_start(args, fmt);
1671 
1672 	vaf.fmt = fmt;
1673 	vaf.va = &args;
1674 
1675 	pr_err("error on %pU: %pV, disabling caching\n",
1676 	       c->set_uuid, &vaf);
1677 
1678 	va_end(args);
1679 
1680 	if (c->on_error == ON_ERROR_PANIC)
1681 		panic("panic forced after error\n");
1682 
1683 	bch_cache_set_unregister(c);
1684 	return true;
1685 }
1686 
1687 /* When c->kobj released */
bch_cache_set_release(struct kobject * kobj)1688 void bch_cache_set_release(struct kobject *kobj)
1689 {
1690 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1691 
1692 	kfree(c);
1693 	module_put(THIS_MODULE);
1694 }
1695 
cache_set_free(struct closure * cl)1696 static void cache_set_free(struct closure *cl)
1697 {
1698 	struct cache_set *c = container_of(cl, struct cache_set, cl);
1699 	struct cache *ca;
1700 
1701 	debugfs_remove(c->debug);
1702 
1703 	bch_open_buckets_free(c);
1704 	bch_btree_cache_free(c);
1705 	bch_journal_free(c);
1706 
1707 	mutex_lock(&bch_register_lock);
1708 	bch_bset_sort_state_free(&c->sort);
1709 	free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
1710 
1711 	ca = c->cache;
1712 	if (ca) {
1713 		ca->set = NULL;
1714 		c->cache = NULL;
1715 		kobject_put(&ca->kobj);
1716 	}
1717 
1718 
1719 	if (c->moving_gc_wq)
1720 		destroy_workqueue(c->moving_gc_wq);
1721 	bioset_exit(&c->bio_split);
1722 	mempool_exit(&c->fill_iter);
1723 	mempool_exit(&c->bio_meta);
1724 	mempool_exit(&c->search);
1725 	kfree(c->devices);
1726 
1727 	list_del(&c->list);
1728 	mutex_unlock(&bch_register_lock);
1729 
1730 	pr_info("Cache set %pU unregistered\n", c->set_uuid);
1731 	wake_up(&unregister_wait);
1732 
1733 	closure_debug_destroy(&c->cl);
1734 	kobject_put(&c->kobj);
1735 }
1736 
cache_set_flush(struct closure * cl)1737 static void cache_set_flush(struct closure *cl)
1738 {
1739 	struct cache_set *c = container_of(cl, struct cache_set, caching);
1740 	struct cache *ca = c->cache;
1741 	struct btree *b;
1742 
1743 	bch_cache_accounting_destroy(&c->accounting);
1744 
1745 	kobject_put(&c->internal);
1746 	kobject_del(&c->kobj);
1747 
1748 	if (!IS_ERR_OR_NULL(c->gc_thread))
1749 		kthread_stop(c->gc_thread);
1750 
1751 	if (!IS_ERR_OR_NULL(c->root))
1752 		list_add(&c->root->list, &c->btree_cache);
1753 
1754 	/*
1755 	 * Avoid flushing cached nodes if cache set is retiring
1756 	 * due to too many I/O errors detected.
1757 	 */
1758 	if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1759 		list_for_each_entry(b, &c->btree_cache, list) {
1760 			mutex_lock(&b->write_lock);
1761 			if (btree_node_dirty(b))
1762 				__bch_btree_node_write(b, NULL);
1763 			mutex_unlock(&b->write_lock);
1764 		}
1765 
1766 	if (ca->alloc_thread)
1767 		kthread_stop(ca->alloc_thread);
1768 
1769 	if (c->journal.cur) {
1770 		cancel_delayed_work_sync(&c->journal.work);
1771 		/* flush last journal entry if needed */
1772 		c->journal.work.work.func(&c->journal.work.work);
1773 	}
1774 
1775 	closure_return(cl);
1776 }
1777 
1778 /*
1779  * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1780  * cache set is unregistering due to too many I/O errors. In this condition,
1781  * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1782  * value and whether the broken cache has dirty data:
1783  *
1784  * dc->stop_when_cache_set_failed    dc->has_dirty   stop bcache device
1785  *  BCH_CACHED_STOP_AUTO               0               NO
1786  *  BCH_CACHED_STOP_AUTO               1               YES
1787  *  BCH_CACHED_DEV_STOP_ALWAYS         0               YES
1788  *  BCH_CACHED_DEV_STOP_ALWAYS         1               YES
1789  *
1790  * The expected behavior is, if stop_when_cache_set_failed is configured to
1791  * "auto" via sysfs interface, the bcache device will not be stopped if the
1792  * backing device is clean on the broken cache device.
1793  */
conditional_stop_bcache_device(struct cache_set * c,struct bcache_device * d,struct cached_dev * dc)1794 static void conditional_stop_bcache_device(struct cache_set *c,
1795 					   struct bcache_device *d,
1796 					   struct cached_dev *dc)
1797 {
1798 	if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
1799 		pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
1800 			d->disk->disk_name, c->set_uuid);
1801 		bcache_device_stop(d);
1802 	} else if (atomic_read(&dc->has_dirty)) {
1803 		/*
1804 		 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1805 		 * and dc->has_dirty == 1
1806 		 */
1807 		pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
1808 			d->disk->disk_name);
1809 		/*
1810 		 * There might be a small time gap that cache set is
1811 		 * released but bcache device is not. Inside this time
1812 		 * gap, regular I/O requests will directly go into
1813 		 * backing device as no cache set attached to. This
1814 		 * behavior may also introduce potential inconsistence
1815 		 * data in writeback mode while cache is dirty.
1816 		 * Therefore before calling bcache_device_stop() due
1817 		 * to a broken cache device, dc->io_disable should be
1818 		 * explicitly set to true.
1819 		 */
1820 		dc->io_disable = true;
1821 		/* make others know io_disable is true earlier */
1822 		smp_mb();
1823 		bcache_device_stop(d);
1824 	} else {
1825 		/*
1826 		 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1827 		 * and dc->has_dirty == 0
1828 		 */
1829 		pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
1830 			d->disk->disk_name);
1831 	}
1832 }
1833 
__cache_set_unregister(struct closure * cl)1834 static void __cache_set_unregister(struct closure *cl)
1835 {
1836 	struct cache_set *c = container_of(cl, struct cache_set, caching);
1837 	struct cached_dev *dc;
1838 	struct bcache_device *d;
1839 	size_t i;
1840 
1841 	mutex_lock(&bch_register_lock);
1842 
1843 	for (i = 0; i < c->devices_max_used; i++) {
1844 		d = c->devices[i];
1845 		if (!d)
1846 			continue;
1847 
1848 		if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1849 		    test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1850 			dc = container_of(d, struct cached_dev, disk);
1851 			bch_cached_dev_detach(dc);
1852 			if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1853 				conditional_stop_bcache_device(c, d, dc);
1854 		} else {
1855 			bcache_device_stop(d);
1856 		}
1857 	}
1858 
1859 	mutex_unlock(&bch_register_lock);
1860 
1861 	continue_at(cl, cache_set_flush, system_wq);
1862 }
1863 
bch_cache_set_stop(struct cache_set * c)1864 void bch_cache_set_stop(struct cache_set *c)
1865 {
1866 	if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1867 		/* closure_fn set to __cache_set_unregister() */
1868 		closure_queue(&c->caching);
1869 }
1870 
bch_cache_set_unregister(struct cache_set * c)1871 void bch_cache_set_unregister(struct cache_set *c)
1872 {
1873 	set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1874 	bch_cache_set_stop(c);
1875 }
1876 
1877 #define alloc_meta_bucket_pages(gfp, sb)		\
1878 	((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
1879 
bch_cache_set_alloc(struct cache_sb * sb)1880 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1881 {
1882 	int iter_size;
1883 	struct cache *ca = container_of(sb, struct cache, sb);
1884 	struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1885 
1886 	if (!c)
1887 		return NULL;
1888 
1889 	__module_get(THIS_MODULE);
1890 	closure_init(&c->cl, NULL);
1891 	set_closure_fn(&c->cl, cache_set_free, system_wq);
1892 
1893 	closure_init(&c->caching, &c->cl);
1894 	set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1895 
1896 	/* Maybe create continue_at_noreturn() and use it here? */
1897 	closure_set_stopped(&c->cl);
1898 	closure_put(&c->cl);
1899 
1900 	kobject_init(&c->kobj, &bch_cache_set_ktype);
1901 	kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1902 
1903 	bch_cache_accounting_init(&c->accounting, &c->cl);
1904 
1905 	memcpy(c->set_uuid, sb->set_uuid, 16);
1906 
1907 	c->cache		= ca;
1908 	c->cache->set		= c;
1909 	c->bucket_bits		= ilog2(sb->bucket_size);
1910 	c->block_bits		= ilog2(sb->block_size);
1911 	c->nr_uuids		= meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
1912 	c->devices_max_used	= 0;
1913 	atomic_set(&c->attached_dev_nr, 0);
1914 	c->btree_pages		= meta_bucket_pages(sb);
1915 	if (c->btree_pages > BTREE_MAX_PAGES)
1916 		c->btree_pages = max_t(int, c->btree_pages / 4,
1917 				       BTREE_MAX_PAGES);
1918 
1919 	sema_init(&c->sb_write_mutex, 1);
1920 	mutex_init(&c->bucket_lock);
1921 	init_waitqueue_head(&c->btree_cache_wait);
1922 	spin_lock_init(&c->btree_cannibalize_lock);
1923 	init_waitqueue_head(&c->bucket_wait);
1924 	init_waitqueue_head(&c->gc_wait);
1925 	sema_init(&c->uuid_write_mutex, 1);
1926 
1927 	spin_lock_init(&c->btree_gc_time.lock);
1928 	spin_lock_init(&c->btree_split_time.lock);
1929 	spin_lock_init(&c->btree_read_time.lock);
1930 
1931 	bch_moving_init_cache_set(c);
1932 
1933 	INIT_LIST_HEAD(&c->list);
1934 	INIT_LIST_HEAD(&c->cached_devs);
1935 	INIT_LIST_HEAD(&c->btree_cache);
1936 	INIT_LIST_HEAD(&c->btree_cache_freeable);
1937 	INIT_LIST_HEAD(&c->btree_cache_freed);
1938 	INIT_LIST_HEAD(&c->data_buckets);
1939 
1940 	iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
1941 		sizeof(struct btree_iter_set);
1942 
1943 	c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
1944 	if (!c->devices)
1945 		goto err;
1946 
1947 	if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
1948 		goto err;
1949 
1950 	if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
1951 			sizeof(struct bbio) +
1952 			sizeof(struct bio_vec) * meta_bucket_pages(sb)))
1953 		goto err;
1954 
1955 	if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
1956 		goto err;
1957 
1958 	if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
1959 			BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
1960 		goto err;
1961 
1962 	c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
1963 	if (!c->uuids)
1964 		goto err;
1965 
1966 	c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
1967 	if (!c->moving_gc_wq)
1968 		goto err;
1969 
1970 	if (bch_journal_alloc(c))
1971 		goto err;
1972 
1973 	if (bch_btree_cache_alloc(c))
1974 		goto err;
1975 
1976 	if (bch_open_buckets_alloc(c))
1977 		goto err;
1978 
1979 	if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1980 		goto err;
1981 
1982 	c->congested_read_threshold_us	= 2000;
1983 	c->congested_write_threshold_us	= 20000;
1984 	c->error_limit	= DEFAULT_IO_ERROR_LIMIT;
1985 	c->idle_max_writeback_rate_enabled = 1;
1986 	WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
1987 
1988 	return c;
1989 err:
1990 	bch_cache_set_unregister(c);
1991 	return NULL;
1992 }
1993 
run_cache_set(struct cache_set * c)1994 static int run_cache_set(struct cache_set *c)
1995 {
1996 	const char *err = "cannot allocate memory";
1997 	struct cached_dev *dc, *t;
1998 	struct cache *ca = c->cache;
1999 	struct closure cl;
2000 	LIST_HEAD(journal);
2001 	struct journal_replay *l;
2002 
2003 	closure_init_stack(&cl);
2004 
2005 	c->nbuckets = ca->sb.nbuckets;
2006 	set_gc_sectors(c);
2007 
2008 	if (CACHE_SYNC(&c->cache->sb)) {
2009 		struct bkey *k;
2010 		struct jset *j;
2011 
2012 		err = "cannot allocate memory for journal";
2013 		if (bch_journal_read(c, &journal))
2014 			goto err;
2015 
2016 		pr_debug("btree_journal_read() done\n");
2017 
2018 		err = "no journal entries found";
2019 		if (list_empty(&journal))
2020 			goto err;
2021 
2022 		j = &list_entry(journal.prev, struct journal_replay, list)->j;
2023 
2024 		err = "IO error reading priorities";
2025 		if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
2026 			goto err;
2027 
2028 		/*
2029 		 * If prio_read() fails it'll call cache_set_error and we'll
2030 		 * tear everything down right away, but if we perhaps checked
2031 		 * sooner we could avoid journal replay.
2032 		 */
2033 
2034 		k = &j->btree_root;
2035 
2036 		err = "bad btree root";
2037 		if (__bch_btree_ptr_invalid(c, k))
2038 			goto err;
2039 
2040 		err = "error reading btree root";
2041 		c->root = bch_btree_node_get(c, NULL, k,
2042 					     j->btree_level,
2043 					     true, NULL);
2044 		if (IS_ERR_OR_NULL(c->root))
2045 			goto err;
2046 
2047 		list_del_init(&c->root->list);
2048 		rw_unlock(true, c->root);
2049 
2050 		err = uuid_read(c, j, &cl);
2051 		if (err)
2052 			goto err;
2053 
2054 		err = "error in recovery";
2055 		if (bch_btree_check(c))
2056 			goto err;
2057 
2058 		bch_journal_mark(c, &journal);
2059 		bch_initial_gc_finish(c);
2060 		pr_debug("btree_check() done\n");
2061 
2062 		/*
2063 		 * bcache_journal_next() can't happen sooner, or
2064 		 * btree_gc_finish() will give spurious errors about last_gc >
2065 		 * gc_gen - this is a hack but oh well.
2066 		 */
2067 		bch_journal_next(&c->journal);
2068 
2069 		err = "error starting allocator thread";
2070 		if (bch_cache_allocator_start(ca))
2071 			goto err;
2072 
2073 		/*
2074 		 * First place it's safe to allocate: btree_check() and
2075 		 * btree_gc_finish() have to run before we have buckets to
2076 		 * allocate, and bch_bucket_alloc_set() might cause a journal
2077 		 * entry to be written so bcache_journal_next() has to be called
2078 		 * first.
2079 		 *
2080 		 * If the uuids were in the old format we have to rewrite them
2081 		 * before the next journal entry is written:
2082 		 */
2083 		if (j->version < BCACHE_JSET_VERSION_UUID)
2084 			__uuid_write(c);
2085 
2086 		err = "bcache: replay journal failed";
2087 		if (bch_journal_replay(c, &journal))
2088 			goto err;
2089 	} else {
2090 		unsigned int j;
2091 
2092 		pr_notice("invalidating existing data\n");
2093 		ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2094 					2, SB_JOURNAL_BUCKETS);
2095 
2096 		for (j = 0; j < ca->sb.keys; j++)
2097 			ca->sb.d[j] = ca->sb.first_bucket + j;
2098 
2099 		bch_initial_gc_finish(c);
2100 
2101 		err = "error starting allocator thread";
2102 		if (bch_cache_allocator_start(ca))
2103 			goto err;
2104 
2105 		mutex_lock(&c->bucket_lock);
2106 		bch_prio_write(ca, true);
2107 		mutex_unlock(&c->bucket_lock);
2108 
2109 		err = "cannot allocate new UUID bucket";
2110 		if (__uuid_write(c))
2111 			goto err;
2112 
2113 		err = "cannot allocate new btree root";
2114 		c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
2115 		if (IS_ERR_OR_NULL(c->root))
2116 			goto err;
2117 
2118 		mutex_lock(&c->root->write_lock);
2119 		bkey_copy_key(&c->root->key, &MAX_KEY);
2120 		bch_btree_node_write(c->root, &cl);
2121 		mutex_unlock(&c->root->write_lock);
2122 
2123 		bch_btree_set_root(c->root);
2124 		rw_unlock(true, c->root);
2125 
2126 		/*
2127 		 * We don't want to write the first journal entry until
2128 		 * everything is set up - fortunately journal entries won't be
2129 		 * written until the SET_CACHE_SYNC() here:
2130 		 */
2131 		SET_CACHE_SYNC(&c->cache->sb, true);
2132 
2133 		bch_journal_next(&c->journal);
2134 		bch_journal_meta(c, &cl);
2135 	}
2136 
2137 	err = "error starting gc thread";
2138 	if (bch_gc_thread_start(c))
2139 		goto err;
2140 
2141 	closure_sync(&cl);
2142 	c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
2143 	bcache_write_super(c);
2144 
2145 	if (bch_has_feature_obso_large_bucket(&c->cache->sb))
2146 		pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
2147 
2148 	list_for_each_entry_safe(dc, t, &uncached_devices, list)
2149 		bch_cached_dev_attach(dc, c, NULL);
2150 
2151 	flash_devs_run(c);
2152 
2153 	set_bit(CACHE_SET_RUNNING, &c->flags);
2154 	return 0;
2155 err:
2156 	while (!list_empty(&journal)) {
2157 		l = list_first_entry(&journal, struct journal_replay, list);
2158 		list_del(&l->list);
2159 		kfree(l);
2160 	}
2161 
2162 	closure_sync(&cl);
2163 
2164 	bch_cache_set_error(c, "%s", err);
2165 
2166 	return -EIO;
2167 }
2168 
register_cache_set(struct cache * ca)2169 static const char *register_cache_set(struct cache *ca)
2170 {
2171 	char buf[12];
2172 	const char *err = "cannot allocate memory";
2173 	struct cache_set *c;
2174 
2175 	list_for_each_entry(c, &bch_cache_sets, list)
2176 		if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
2177 			if (c->cache)
2178 				return "duplicate cache set member";
2179 
2180 			goto found;
2181 		}
2182 
2183 	c = bch_cache_set_alloc(&ca->sb);
2184 	if (!c)
2185 		return err;
2186 
2187 	err = "error creating kobject";
2188 	if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
2189 	    kobject_add(&c->internal, &c->kobj, "internal"))
2190 		goto err;
2191 
2192 	if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
2193 		goto err;
2194 
2195 	bch_debug_init_cache_set(c);
2196 
2197 	list_add(&c->list, &bch_cache_sets);
2198 found:
2199 	sprintf(buf, "cache%i", ca->sb.nr_this_dev);
2200 	if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
2201 	    sysfs_create_link(&c->kobj, &ca->kobj, buf))
2202 		goto err;
2203 
2204 	kobject_get(&ca->kobj);
2205 	ca->set = c;
2206 	ca->set->cache = ca;
2207 
2208 	err = "failed to run cache set";
2209 	if (run_cache_set(c) < 0)
2210 		goto err;
2211 
2212 	return NULL;
2213 err:
2214 	bch_cache_set_unregister(c);
2215 	return err;
2216 }
2217 
2218 /* Cache device */
2219 
2220 /* When ca->kobj released */
bch_cache_release(struct kobject * kobj)2221 void bch_cache_release(struct kobject *kobj)
2222 {
2223 	struct cache *ca = container_of(kobj, struct cache, kobj);
2224 	unsigned int i;
2225 
2226 	if (ca->set) {
2227 		BUG_ON(ca->set->cache != ca);
2228 		ca->set->cache = NULL;
2229 	}
2230 
2231 	free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
2232 	kfree(ca->prio_buckets);
2233 	vfree(ca->buckets);
2234 
2235 	free_heap(&ca->heap);
2236 	free_fifo(&ca->free_inc);
2237 
2238 	for (i = 0; i < RESERVE_NR; i++)
2239 		free_fifo(&ca->free[i]);
2240 
2241 	if (ca->sb_disk)
2242 		put_page(virt_to_page(ca->sb_disk));
2243 
2244 	if (!IS_ERR_OR_NULL(ca->bdev))
2245 		blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2246 
2247 	kfree(ca);
2248 	module_put(THIS_MODULE);
2249 }
2250 
cache_alloc(struct cache * ca)2251 static int cache_alloc(struct cache *ca)
2252 {
2253 	size_t free;
2254 	size_t btree_buckets;
2255 	struct bucket *b;
2256 	int ret = -ENOMEM;
2257 	const char *err = NULL;
2258 
2259 	__module_get(THIS_MODULE);
2260 	kobject_init(&ca->kobj, &bch_cache_ktype);
2261 
2262 	bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
2263 
2264 	/*
2265 	 * when ca->sb.njournal_buckets is not zero, journal exists,
2266 	 * and in bch_journal_replay(), tree node may split,
2267 	 * so bucket of RESERVE_BTREE type is needed,
2268 	 * the worst situation is all journal buckets are valid journal,
2269 	 * and all the keys need to replay,
2270 	 * so the number of  RESERVE_BTREE type buckets should be as much
2271 	 * as journal buckets
2272 	 */
2273 	btree_buckets = ca->sb.njournal_buckets ?: 8;
2274 	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2275 	if (!free) {
2276 		ret = -EPERM;
2277 		err = "ca->sb.nbuckets is too small";
2278 		goto err_free;
2279 	}
2280 
2281 	if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
2282 						GFP_KERNEL)) {
2283 		err = "ca->free[RESERVE_BTREE] alloc failed";
2284 		goto err_btree_alloc;
2285 	}
2286 
2287 	if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
2288 							GFP_KERNEL)) {
2289 		err = "ca->free[RESERVE_PRIO] alloc failed";
2290 		goto err_prio_alloc;
2291 	}
2292 
2293 	if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
2294 		err = "ca->free[RESERVE_MOVINGGC] alloc failed";
2295 		goto err_movinggc_alloc;
2296 	}
2297 
2298 	if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
2299 		err = "ca->free[RESERVE_NONE] alloc failed";
2300 		goto err_none_alloc;
2301 	}
2302 
2303 	if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
2304 		err = "ca->free_inc alloc failed";
2305 		goto err_free_inc_alloc;
2306 	}
2307 
2308 	if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
2309 		err = "ca->heap alloc failed";
2310 		goto err_heap_alloc;
2311 	}
2312 
2313 	ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2314 			      ca->sb.nbuckets));
2315 	if (!ca->buckets) {
2316 		err = "ca->buckets alloc failed";
2317 		goto err_buckets_alloc;
2318 	}
2319 
2320 	ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
2321 				   prio_buckets(ca), 2),
2322 				   GFP_KERNEL);
2323 	if (!ca->prio_buckets) {
2324 		err = "ca->prio_buckets alloc failed";
2325 		goto err_prio_buckets_alloc;
2326 	}
2327 
2328 	ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
2329 	if (!ca->disk_buckets) {
2330 		err = "ca->disk_buckets alloc failed";
2331 		goto err_disk_buckets_alloc;
2332 	}
2333 
2334 	ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2335 
2336 	for_each_bucket(b, ca)
2337 		atomic_set(&b->pin, 0);
2338 	return 0;
2339 
2340 err_disk_buckets_alloc:
2341 	kfree(ca->prio_buckets);
2342 err_prio_buckets_alloc:
2343 	vfree(ca->buckets);
2344 err_buckets_alloc:
2345 	free_heap(&ca->heap);
2346 err_heap_alloc:
2347 	free_fifo(&ca->free_inc);
2348 err_free_inc_alloc:
2349 	free_fifo(&ca->free[RESERVE_NONE]);
2350 err_none_alloc:
2351 	free_fifo(&ca->free[RESERVE_MOVINGGC]);
2352 err_movinggc_alloc:
2353 	free_fifo(&ca->free[RESERVE_PRIO]);
2354 err_prio_alloc:
2355 	free_fifo(&ca->free[RESERVE_BTREE]);
2356 err_btree_alloc:
2357 err_free:
2358 	module_put(THIS_MODULE);
2359 	if (err)
2360 		pr_notice("error %s: %s\n", ca->cache_dev_name, err);
2361 	return ret;
2362 }
2363 
register_cache(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct block_device * bdev,struct cache * ca)2364 static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
2365 				struct block_device *bdev, struct cache *ca)
2366 {
2367 	const char *err = NULL; /* must be set for any error case */
2368 	int ret = 0;
2369 
2370 	bdevname(bdev, ca->cache_dev_name);
2371 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2372 	ca->bdev = bdev;
2373 	ca->bdev->bd_holder = ca;
2374 	ca->sb_disk = sb_disk;
2375 
2376 	if (blk_queue_discard(bdev_get_queue(bdev)))
2377 		ca->discard = CACHE_DISCARD(&ca->sb);
2378 
2379 	ret = cache_alloc(ca);
2380 	if (ret != 0) {
2381 		/*
2382 		 * If we failed here, it means ca->kobj is not initialized yet,
2383 		 * kobject_put() won't be called and there is no chance to
2384 		 * call blkdev_put() to bdev in bch_cache_release(). So we
2385 		 * explicitly call blkdev_put() here.
2386 		 */
2387 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2388 		if (ret == -ENOMEM)
2389 			err = "cache_alloc(): -ENOMEM";
2390 		else if (ret == -EPERM)
2391 			err = "cache_alloc(): cache device is too small";
2392 		else
2393 			err = "cache_alloc(): unknown error";
2394 		goto err;
2395 	}
2396 
2397 	if (kobject_add(&ca->kobj,
2398 			&part_to_dev(bdev->bd_part)->kobj,
2399 			"bcache")) {
2400 		err = "error calling kobject_add";
2401 		ret = -ENOMEM;
2402 		goto out;
2403 	}
2404 
2405 	mutex_lock(&bch_register_lock);
2406 	err = register_cache_set(ca);
2407 	mutex_unlock(&bch_register_lock);
2408 
2409 	if (err) {
2410 		ret = -ENODEV;
2411 		goto out;
2412 	}
2413 
2414 	pr_info("registered cache device %s\n", ca->cache_dev_name);
2415 
2416 out:
2417 	kobject_put(&ca->kobj);
2418 
2419 err:
2420 	if (err)
2421 		pr_notice("error %s: %s\n", ca->cache_dev_name, err);
2422 
2423 	return ret;
2424 }
2425 
2426 /* Global interfaces/init */
2427 
2428 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2429 			       const char *buffer, size_t size);
2430 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
2431 					 struct kobj_attribute *attr,
2432 					 const char *buffer, size_t size);
2433 
2434 kobj_attribute_write(register,		register_bcache);
2435 kobj_attribute_write(register_quiet,	register_bcache);
2436 kobj_attribute_write(pendings_cleanup,	bch_pending_bdevs_cleanup);
2437 
bch_is_open_backing(struct block_device * bdev)2438 static bool bch_is_open_backing(struct block_device *bdev)
2439 {
2440 	struct cache_set *c, *tc;
2441 	struct cached_dev *dc, *t;
2442 
2443 	list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2444 		list_for_each_entry_safe(dc, t, &c->cached_devs, list)
2445 			if (dc->bdev == bdev)
2446 				return true;
2447 	list_for_each_entry_safe(dc, t, &uncached_devices, list)
2448 		if (dc->bdev == bdev)
2449 			return true;
2450 	return false;
2451 }
2452 
bch_is_open_cache(struct block_device * bdev)2453 static bool bch_is_open_cache(struct block_device *bdev)
2454 {
2455 	struct cache_set *c, *tc;
2456 
2457 	list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
2458 		struct cache *ca = c->cache;
2459 
2460 		if (ca->bdev == bdev)
2461 			return true;
2462 	}
2463 
2464 	return false;
2465 }
2466 
bch_is_open(struct block_device * bdev)2467 static bool bch_is_open(struct block_device *bdev)
2468 {
2469 	return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
2470 }
2471 
2472 struct async_reg_args {
2473 	struct delayed_work reg_work;
2474 	char *path;
2475 	struct cache_sb *sb;
2476 	struct cache_sb_disk *sb_disk;
2477 	struct block_device *bdev;
2478 };
2479 
register_bdev_worker(struct work_struct * work)2480 static void register_bdev_worker(struct work_struct *work)
2481 {
2482 	int fail = false;
2483 	struct async_reg_args *args =
2484 		container_of(work, struct async_reg_args, reg_work.work);
2485 	struct cached_dev *dc;
2486 
2487 	dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2488 	if (!dc) {
2489 		fail = true;
2490 		put_page(virt_to_page(args->sb_disk));
2491 		blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2492 		goto out;
2493 	}
2494 
2495 	mutex_lock(&bch_register_lock);
2496 	if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0)
2497 		fail = true;
2498 	mutex_unlock(&bch_register_lock);
2499 
2500 out:
2501 	if (fail)
2502 		pr_info("error %s: fail to register backing device\n",
2503 			args->path);
2504 	kfree(args->sb);
2505 	kfree(args->path);
2506 	kfree(args);
2507 	module_put(THIS_MODULE);
2508 }
2509 
register_cache_worker(struct work_struct * work)2510 static void register_cache_worker(struct work_struct *work)
2511 {
2512 	int fail = false;
2513 	struct async_reg_args *args =
2514 		container_of(work, struct async_reg_args, reg_work.work);
2515 	struct cache *ca;
2516 
2517 	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2518 	if (!ca) {
2519 		fail = true;
2520 		put_page(virt_to_page(args->sb_disk));
2521 		blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2522 		goto out;
2523 	}
2524 
2525 	/* blkdev_put() will be called in bch_cache_release() */
2526 	if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0)
2527 		fail = true;
2528 
2529 out:
2530 	if (fail)
2531 		pr_info("error %s: fail to register cache device\n",
2532 			args->path);
2533 	kfree(args->sb);
2534 	kfree(args->path);
2535 	kfree(args);
2536 	module_put(THIS_MODULE);
2537 }
2538 
register_device_aync(struct async_reg_args * args)2539 static void register_device_aync(struct async_reg_args *args)
2540 {
2541 	if (SB_IS_BDEV(args->sb))
2542 		INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
2543 	else
2544 		INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
2545 
2546 	/* 10 jiffies is enough for a delay */
2547 	queue_delayed_work(system_wq, &args->reg_work, 10);
2548 }
2549 
register_bcache(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)2550 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2551 			       const char *buffer, size_t size)
2552 {
2553 	const char *err;
2554 	char *path = NULL;
2555 	struct cache_sb *sb;
2556 	struct cache_sb_disk *sb_disk;
2557 	struct block_device *bdev;
2558 	ssize_t ret;
2559 	bool async_registration = false;
2560 
2561 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
2562 	async_registration = true;
2563 #endif
2564 
2565 	ret = -EBUSY;
2566 	err = "failed to reference bcache module";
2567 	if (!try_module_get(THIS_MODULE))
2568 		goto out;
2569 
2570 	/* For latest state of bcache_is_reboot */
2571 	smp_mb();
2572 	err = "bcache is in reboot";
2573 	if (bcache_is_reboot)
2574 		goto out_module_put;
2575 
2576 	ret = -ENOMEM;
2577 	err = "cannot allocate memory";
2578 	path = kstrndup(buffer, size, GFP_KERNEL);
2579 	if (!path)
2580 		goto out_module_put;
2581 
2582 	sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
2583 	if (!sb)
2584 		goto out_free_path;
2585 
2586 	ret = -EINVAL;
2587 	err = "failed to open device";
2588 	bdev = blkdev_get_by_path(strim(path),
2589 				  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2590 				  sb);
2591 	if (IS_ERR(bdev)) {
2592 		if (bdev == ERR_PTR(-EBUSY)) {
2593 			bdev = lookup_bdev(strim(path));
2594 			mutex_lock(&bch_register_lock);
2595 			if (!IS_ERR(bdev) && bch_is_open(bdev))
2596 				err = "device already registered";
2597 			else
2598 				err = "device busy";
2599 			mutex_unlock(&bch_register_lock);
2600 			if (!IS_ERR(bdev))
2601 				bdput(bdev);
2602 			if (attr == &ksysfs_register_quiet)
2603 				goto done;
2604 		}
2605 		goto out_free_sb;
2606 	}
2607 
2608 	err = "failed to set blocksize";
2609 	if (set_blocksize(bdev, 4096))
2610 		goto out_blkdev_put;
2611 
2612 	err = read_super(sb, bdev, &sb_disk);
2613 	if (err)
2614 		goto out_blkdev_put;
2615 
2616 	err = "failed to register device";
2617 
2618 	if (async_registration) {
2619 		/* register in asynchronous way */
2620 		struct async_reg_args *args =
2621 			kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
2622 
2623 		if (!args) {
2624 			ret = -ENOMEM;
2625 			err = "cannot allocate memory";
2626 			goto out_put_sb_page;
2627 		}
2628 
2629 		args->path	= path;
2630 		args->sb	= sb;
2631 		args->sb_disk	= sb_disk;
2632 		args->bdev	= bdev;
2633 		register_device_aync(args);
2634 		/* No wait and returns to user space */
2635 		goto async_done;
2636 	}
2637 
2638 	if (SB_IS_BDEV(sb)) {
2639 		struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2640 
2641 		if (!dc)
2642 			goto out_put_sb_page;
2643 
2644 		mutex_lock(&bch_register_lock);
2645 		ret = register_bdev(sb, sb_disk, bdev, dc);
2646 		mutex_unlock(&bch_register_lock);
2647 		/* blkdev_put() will be called in cached_dev_free() */
2648 		if (ret < 0)
2649 			goto out_free_sb;
2650 	} else {
2651 		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2652 
2653 		if (!ca)
2654 			goto out_put_sb_page;
2655 
2656 		/* blkdev_put() will be called in bch_cache_release() */
2657 		if (register_cache(sb, sb_disk, bdev, ca) != 0)
2658 			goto out_free_sb;
2659 	}
2660 
2661 done:
2662 	kfree(sb);
2663 	kfree(path);
2664 	module_put(THIS_MODULE);
2665 async_done:
2666 	return size;
2667 
2668 out_put_sb_page:
2669 	put_page(virt_to_page(sb_disk));
2670 out_blkdev_put:
2671 	blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2672 out_free_sb:
2673 	kfree(sb);
2674 out_free_path:
2675 	kfree(path);
2676 	path = NULL;
2677 out_module_put:
2678 	module_put(THIS_MODULE);
2679 out:
2680 	pr_info("error %s: %s\n", path?path:"", err);
2681 	return ret;
2682 }
2683 
2684 
2685 struct pdev {
2686 	struct list_head list;
2687 	struct cached_dev *dc;
2688 };
2689 
bch_pending_bdevs_cleanup(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)2690 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
2691 					 struct kobj_attribute *attr,
2692 					 const char *buffer,
2693 					 size_t size)
2694 {
2695 	LIST_HEAD(pending_devs);
2696 	ssize_t ret = size;
2697 	struct cached_dev *dc, *tdc;
2698 	struct pdev *pdev, *tpdev;
2699 	struct cache_set *c, *tc;
2700 
2701 	mutex_lock(&bch_register_lock);
2702 	list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
2703 		pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
2704 		if (!pdev)
2705 			break;
2706 		pdev->dc = dc;
2707 		list_add(&pdev->list, &pending_devs);
2708 	}
2709 
2710 	list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
2711 		list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
2712 			char *pdev_set_uuid = pdev->dc->sb.set_uuid;
2713 			char *set_uuid = c->set_uuid;
2714 
2715 			if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
2716 				list_del(&pdev->list);
2717 				kfree(pdev);
2718 				break;
2719 			}
2720 		}
2721 	}
2722 	mutex_unlock(&bch_register_lock);
2723 
2724 	list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
2725 		pr_info("delete pdev %p\n", pdev);
2726 		list_del(&pdev->list);
2727 		bcache_device_stop(&pdev->dc->disk);
2728 		kfree(pdev);
2729 	}
2730 
2731 	return ret;
2732 }
2733 
bcache_reboot(struct notifier_block * n,unsigned long code,void * x)2734 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2735 {
2736 	if (bcache_is_reboot)
2737 		return NOTIFY_DONE;
2738 
2739 	if (code == SYS_DOWN ||
2740 	    code == SYS_HALT ||
2741 	    code == SYS_POWER_OFF) {
2742 		DEFINE_WAIT(wait);
2743 		unsigned long start = jiffies;
2744 		bool stopped = false;
2745 
2746 		struct cache_set *c, *tc;
2747 		struct cached_dev *dc, *tdc;
2748 
2749 		mutex_lock(&bch_register_lock);
2750 
2751 		if (bcache_is_reboot)
2752 			goto out;
2753 
2754 		/* New registration is rejected since now */
2755 		bcache_is_reboot = true;
2756 		/*
2757 		 * Make registering caller (if there is) on other CPU
2758 		 * core know bcache_is_reboot set to true earlier
2759 		 */
2760 		smp_mb();
2761 
2762 		if (list_empty(&bch_cache_sets) &&
2763 		    list_empty(&uncached_devices))
2764 			goto out;
2765 
2766 		mutex_unlock(&bch_register_lock);
2767 
2768 		pr_info("Stopping all devices:\n");
2769 
2770 		/*
2771 		 * The reason bch_register_lock is not held to call
2772 		 * bch_cache_set_stop() and bcache_device_stop() is to
2773 		 * avoid potential deadlock during reboot, because cache
2774 		 * set or bcache device stopping process will acqurie
2775 		 * bch_register_lock too.
2776 		 *
2777 		 * We are safe here because bcache_is_reboot sets to
2778 		 * true already, register_bcache() will reject new
2779 		 * registration now. bcache_is_reboot also makes sure
2780 		 * bcache_reboot() won't be re-entered on by other thread,
2781 		 * so there is no race in following list iteration by
2782 		 * list_for_each_entry_safe().
2783 		 */
2784 		list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2785 			bch_cache_set_stop(c);
2786 
2787 		list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2788 			bcache_device_stop(&dc->disk);
2789 
2790 
2791 		/*
2792 		 * Give an early chance for other kthreads and
2793 		 * kworkers to stop themselves
2794 		 */
2795 		schedule();
2796 
2797 		/* What's a condition variable? */
2798 		while (1) {
2799 			long timeout = start + 10 * HZ - jiffies;
2800 
2801 			mutex_lock(&bch_register_lock);
2802 			stopped = list_empty(&bch_cache_sets) &&
2803 				list_empty(&uncached_devices);
2804 
2805 			if (timeout < 0 || stopped)
2806 				break;
2807 
2808 			prepare_to_wait(&unregister_wait, &wait,
2809 					TASK_UNINTERRUPTIBLE);
2810 
2811 			mutex_unlock(&bch_register_lock);
2812 			schedule_timeout(timeout);
2813 		}
2814 
2815 		finish_wait(&unregister_wait, &wait);
2816 
2817 		if (stopped)
2818 			pr_info("All devices stopped\n");
2819 		else
2820 			pr_notice("Timeout waiting for devices to be closed\n");
2821 out:
2822 		mutex_unlock(&bch_register_lock);
2823 	}
2824 
2825 	return NOTIFY_DONE;
2826 }
2827 
2828 static struct notifier_block reboot = {
2829 	.notifier_call	= bcache_reboot,
2830 	.priority	= INT_MAX, /* before any real devices */
2831 };
2832 
bcache_exit(void)2833 static void bcache_exit(void)
2834 {
2835 	bch_debug_exit();
2836 	bch_request_exit();
2837 	if (bcache_kobj)
2838 		kobject_put(bcache_kobj);
2839 	if (bcache_wq)
2840 		destroy_workqueue(bcache_wq);
2841 	if (bch_journal_wq)
2842 		destroy_workqueue(bch_journal_wq);
2843 	if (bch_flush_wq)
2844 		destroy_workqueue(bch_flush_wq);
2845 	bch_btree_exit();
2846 
2847 	if (bcache_major)
2848 		unregister_blkdev(bcache_major, "bcache");
2849 	unregister_reboot_notifier(&reboot);
2850 	mutex_destroy(&bch_register_lock);
2851 }
2852 
2853 /* Check and fixup module parameters */
check_module_parameters(void)2854 static void check_module_parameters(void)
2855 {
2856 	if (bch_cutoff_writeback_sync == 0)
2857 		bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
2858 	else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
2859 		pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
2860 			bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
2861 		bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
2862 	}
2863 
2864 	if (bch_cutoff_writeback == 0)
2865 		bch_cutoff_writeback = CUTOFF_WRITEBACK;
2866 	else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
2867 		pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
2868 			bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
2869 		bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
2870 	}
2871 
2872 	if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
2873 		pr_warn("set bch_cutoff_writeback (%u) to %u\n",
2874 			bch_cutoff_writeback, bch_cutoff_writeback_sync);
2875 		bch_cutoff_writeback = bch_cutoff_writeback_sync;
2876 	}
2877 }
2878 
bcache_init(void)2879 static int __init bcache_init(void)
2880 {
2881 	static const struct attribute *files[] = {
2882 		&ksysfs_register.attr,
2883 		&ksysfs_register_quiet.attr,
2884 		&ksysfs_pendings_cleanup.attr,
2885 		NULL
2886 	};
2887 
2888 	check_module_parameters();
2889 
2890 	mutex_init(&bch_register_lock);
2891 	init_waitqueue_head(&unregister_wait);
2892 	register_reboot_notifier(&reboot);
2893 
2894 	bcache_major = register_blkdev(0, "bcache");
2895 	if (bcache_major < 0) {
2896 		unregister_reboot_notifier(&reboot);
2897 		mutex_destroy(&bch_register_lock);
2898 		return bcache_major;
2899 	}
2900 
2901 	if (bch_btree_init())
2902 		goto err;
2903 
2904 	bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
2905 	if (!bcache_wq)
2906 		goto err;
2907 
2908 	/*
2909 	 * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
2910 	 *
2911 	 * 1. It used `system_wq` before which also does no memory reclaim.
2912 	 * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
2913 	 *    reduced throughput can be observed.
2914 	 *
2915 	 * We still want to user our own queue to not congest the `system_wq`.
2916 	 */
2917 	bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
2918 	if (!bch_flush_wq)
2919 		goto err;
2920 
2921 	bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
2922 	if (!bch_journal_wq)
2923 		goto err;
2924 
2925 	bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2926 	if (!bcache_kobj)
2927 		goto err;
2928 
2929 	if (bch_request_init() ||
2930 	    sysfs_create_files(bcache_kobj, files))
2931 		goto err;
2932 
2933 	bch_debug_init();
2934 	closure_debug_init();
2935 
2936 	bcache_is_reboot = false;
2937 
2938 	return 0;
2939 err:
2940 	bcache_exit();
2941 	return -ENOMEM;
2942 }
2943 
2944 /*
2945  * Module hooks
2946  */
2947 module_exit(bcache_exit);
2948 module_init(bcache_init);
2949 
2950 module_param(bch_cutoff_writeback, uint, 0);
2951 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
2952 
2953 module_param(bch_cutoff_writeback_sync, uint, 0);
2954 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
2955 
2956 MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
2957 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
2958 MODULE_LICENSE("GPL");
2959