Lines Matching refs:k
32 i->k = bkey_next(i->k); in sort_key_next()
34 if (i->k == i->end) in sort_key_next()
41 int64_t c = bkey_cmp(l.k, r.k); in bch_key_sort_cmp()
43 return c ? c > 0 : l.k < r.k; in bch_key_sort_cmp()
46 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) in __ptr_invalid() argument
50 for (i = 0; i < KEY_PTRS(k); i++) in __ptr_invalid()
51 if (ptr_available(c, k, i)) { in __ptr_invalid()
52 struct cache *ca = PTR_CACHE(c, k, i); in __ptr_invalid()
53 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid()
54 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in __ptr_invalid()
56 if (KEY_SIZE(k) + r > c->sb.bucket_size || in __ptr_invalid()
67 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) in bch_ptr_status() argument
71 for (i = 0; i < KEY_PTRS(k); i++) in bch_ptr_status()
72 if (ptr_available(c, k, i)) { in bch_ptr_status()
73 struct cache *ca = PTR_CACHE(c, k, i); in bch_ptr_status()
74 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status()
75 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in bch_ptr_status()
77 if (KEY_SIZE(k) + r > c->sb.bucket_size) in bch_ptr_status()
83 if (ptr_stale(c, k, i)) in bch_ptr_status()
87 if (!bkey_cmp(k, &ZERO_KEY)) in bch_ptr_status()
89 if (!KEY_PTRS(k)) in bch_ptr_status()
91 if (!KEY_SIZE(k)) in bch_ptr_status()
96 void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) in bch_extent_to_text() argument
103 p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); in bch_extent_to_text()
105 for (i = 0; i < KEY_PTRS(k); i++) { in bch_extent_to_text()
109 if (PTR_DEV(k, i) == PTR_CHECK_DEV) in bch_extent_to_text()
112 p("%llu:%llu gen %llu", PTR_DEV(k, i), in bch_extent_to_text()
113 PTR_OFFSET(k, i), PTR_GEN(k, i)); in bch_extent_to_text()
118 if (KEY_DIRTY(k)) in bch_extent_to_text()
120 if (KEY_CSUM(k)) in bch_extent_to_text()
121 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); in bch_extent_to_text()
125 static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) in bch_bkey_dump() argument
131 bch_extent_to_text(buf, sizeof(buf), k); in bch_bkey_dump()
134 for (j = 0; j < KEY_PTRS(k); j++) { in bch_bkey_dump()
135 size_t n = PTR_BUCKET_NR(b->c, k, j); in bch_bkey_dump()
140 PTR_BUCKET(b->c, k, j)->prio); in bch_bkey_dump()
143 printk(" %s\n", bch_ptr_status(b->c, k)); in bch_bkey_dump()
148 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) in __bch_btree_ptr_invalid() argument
152 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) in __bch_btree_ptr_invalid()
155 if (__ptr_invalid(c, k)) in __bch_btree_ptr_invalid()
160 bch_extent_to_text(buf, sizeof(buf), k); in __bch_btree_ptr_invalid()
161 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); in __bch_btree_ptr_invalid()
165 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) in bch_btree_ptr_invalid() argument
168 return __bch_btree_ptr_invalid(b->c, k); in bch_btree_ptr_invalid()
171 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() argument
178 for (i = 0; i < KEY_PTRS(k); i++) in btree_ptr_bad_expensive()
179 if (ptr_available(b->c, k, i)) { in btree_ptr_bad_expensive()
180 g = PTR_BUCKET(b->c, k, i); in btree_ptr_bad_expensive()
182 if (KEY_DIRTY(k) || in btree_ptr_bad_expensive()
195 bch_extent_to_text(buf, sizeof(buf), k); in btree_ptr_bad_expensive()
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), in btree_ptr_bad_expensive()
203 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) in bch_btree_ptr_bad() argument
208 if (!bkey_cmp(k, &ZERO_KEY) || in bch_btree_ptr_bad()
209 !KEY_PTRS(k) || in bch_btree_ptr_bad()
210 bch_ptr_invalid(bk, k)) in bch_btree_ptr_bad()
213 for (i = 0; i < KEY_PTRS(k); i++) in bch_btree_ptr_bad()
214 if (!ptr_available(b->c, k, i) || in bch_btree_ptr_bad()
215 ptr_stale(b->c, k, i)) in bch_btree_ptr_bad()
219 btree_ptr_bad_expensive(b, k)) in bch_btree_ptr_bad()
259 int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); in bch_extent_sort_cmp()
261 return c ? c > 0 : l.k < r.k; in bch_extent_sort_cmp()
274 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) in bch_extent_sort_fixup()
277 if (!KEY_SIZE(i->k)) { in bch_extent_sort_fixup()
283 if (top->k > i->k) { in bch_extent_sort_fixup()
284 if (bkey_cmp(top->k, i->k) >= 0) in bch_extent_sort_fixup()
287 bch_cut_front(top->k, i->k); in bch_extent_sort_fixup()
292 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); in bch_extent_sort_fixup()
294 if (bkey_cmp(i->k, top->k) < 0) { in bch_extent_sort_fixup()
295 bkey_copy(tmp, top->k); in bch_extent_sort_fixup()
297 bch_cut_back(&START_KEY(i->k), tmp); in bch_extent_sort_fixup()
298 bch_cut_front(i->k, top->k); in bch_extent_sort_fixup()
303 bch_cut_back(&START_KEY(i->k), top->k); in bch_extent_sort_fixup()
311 static void bch_subtract_dirty(struct bkey *k, in bch_subtract_dirty() argument
316 if (KEY_DIRTY(k)) in bch_subtract_dirty()
317 bcache_dev_sectors_dirty_add(c, KEY_INODE(k), in bch_subtract_dirty()
335 struct bkey *k = bch_btree_iter_next(iter); in bch_extent_insert_fixup() local
336 if (!k) in bch_extent_insert_fixup()
339 if (bkey_cmp(&START_KEY(k), insert) >= 0) { in bch_extent_insert_fixup()
340 if (KEY_SIZE(k)) in bch_extent_insert_fixup()
346 if (bkey_cmp(k, &START_KEY(insert)) <= 0) in bch_extent_insert_fixup()
349 old_offset = KEY_START(k); in bch_extent_insert_fixup()
350 old_size = KEY_SIZE(k); in bch_extent_insert_fixup()
360 if (replace_key && KEY_SIZE(k)) { in bch_extent_insert_fixup()
366 uint64_t offset = KEY_START(k) - in bch_extent_insert_fixup()
370 if (KEY_START(k) < KEY_START(replace_key) || in bch_extent_insert_fixup()
371 KEY_OFFSET(k) > KEY_OFFSET(replace_key)) in bch_extent_insert_fixup()
375 if (KEY_START(k) > KEY_START(insert) + sectors_found) in bch_extent_insert_fixup()
378 if (!bch_bkey_equal_header(k, replace_key)) in bch_extent_insert_fixup()
387 if (k->ptr[i] != replace_key->ptr[i] + offset) in bch_extent_insert_fixup()
390 sectors_found = KEY_OFFSET(k) - KEY_START(insert); in bch_extent_insert_fixup()
393 if (bkey_cmp(insert, k) < 0 && in bch_extent_insert_fixup()
394 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { in bch_extent_insert_fixup()
404 bch_subtract_dirty(k, c, KEY_START(insert), in bch_extent_insert_fixup()
407 if (bkey_written(b, k)) { in bch_extent_insert_fixup()
422 bch_bset_insert(b, top, k); in bch_extent_insert_fixup()
425 bkey_copy(&temp.key, k); in bch_extent_insert_fixup()
426 bch_bset_insert(b, k, &temp.key); in bch_extent_insert_fixup()
427 top = bkey_next(k); in bch_extent_insert_fixup()
431 bch_cut_back(&START_KEY(insert), k); in bch_extent_insert_fixup()
432 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
436 if (bkey_cmp(insert, k) < 0) { in bch_extent_insert_fixup()
437 bch_cut_front(insert, k); in bch_extent_insert_fixup()
439 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) in bch_extent_insert_fixup()
442 if (bkey_written(b, k) && in bch_extent_insert_fixup()
443 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { in bch_extent_insert_fixup()
448 bch_cut_front(k, k); in bch_extent_insert_fixup()
450 __bch_cut_back(&START_KEY(insert), k); in bch_extent_insert_fixup()
451 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
455 bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); in bch_extent_insert_fixup()
477 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) in __bch_extent_invalid() argument
481 if (!KEY_SIZE(k)) in __bch_extent_invalid()
484 if (KEY_SIZE(k) > KEY_OFFSET(k)) in __bch_extent_invalid()
487 if (__ptr_invalid(c, k)) in __bch_extent_invalid()
492 bch_extent_to_text(buf, sizeof(buf), k); in __bch_extent_invalid()
493 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); in __bch_extent_invalid()
497 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) in bch_extent_invalid() argument
500 return __bch_extent_invalid(b->c, k); in bch_extent_invalid()
503 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() argument
506 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
513 (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) in bch_extent_bad_expensive()
525 bch_extent_to_text(buf, sizeof(buf), k); in bch_extent_bad_expensive()
528 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), in bch_extent_bad_expensive()
533 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) in bch_extent_bad() argument
539 if (!KEY_PTRS(k) || in bch_extent_bad()
540 bch_extent_invalid(bk, k)) in bch_extent_bad()
543 for (i = 0; i < KEY_PTRS(k); i++) in bch_extent_bad()
544 if (!ptr_available(b->c, k, i)) in bch_extent_bad()
547 if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) in bch_extent_bad()
550 for (i = 0; i < KEY_PTRS(k); i++) { in bch_extent_bad()
551 g = PTR_BUCKET(b->c, k, i); in bch_extent_bad()
552 stale = ptr_stale(b->c, k, i); in bch_extent_bad()
558 btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), in bch_extent_bad()
565 bch_extent_bad_expensive(b, k, i)) in bch_extent_bad()