• Home
  • Raw
  • Download

Lines Matching refs:k

21 	struct bkey *k, *next;  in bch_dump_bset()  local
23 for (k = i->start; k < bset_bkey_last(i); k = next) { in bch_dump_bset()
24 next = bkey_next(k); in bch_dump_bset()
27 (unsigned) ((u64 *) k - i->d), i->keys); in bch_dump_bset()
30 b->ops->key_dump(b, k); in bch_dump_bset()
32 printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); in bch_dump_bset()
35 bkey_cmp(k, b->ops->is_extents ? in bch_dump_bset()
56 struct bkey *k; in __bch_count_data() local
59 for_each_key(b, k, &iter) in __bch_count_data()
60 ret += KEY_SIZE(k); in __bch_count_data()
67 struct bkey *k, *p = NULL; in __bch_check_keys() local
71 for_each_key(b, k, &iter) { in __bch_check_keys()
74 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) in __bch_check_keys()
77 if (bch_ptr_invalid(b, k)) in __bch_check_keys()
81 if (p && bkey_cmp(p, &START_KEY(k)) > 0) in __bch_check_keys()
84 if (bch_ptr_bad(b, k)) in __bch_check_keys()
88 if (p && !bkey_cmp(p, k)) in __bch_check_keys()
91 p = k; in __bch_check_keys()
111 struct bkey *k = iter->data->k, *next = bkey_next(k); in bch_btree_iter_next_check() local
114 bkey_cmp(k, iter->b->ops->is_extents ? in bch_btree_iter_next_check()
158 struct bkey *k = l->keys; in bch_keylist_pop() local
160 if (k == l->top) in bch_keylist_pop()
163 while (bkey_next(k) != l->top) in bch_keylist_pop()
164 k = bkey_next(k); in bch_keylist_pop()
166 return l->top = k; in bch_keylist_pop()
193 bool __bch_cut_front(const struct bkey *where, struct bkey *k) in __bch_cut_front() argument
197 if (bkey_cmp(where, &START_KEY(k)) <= 0) in __bch_cut_front()
200 if (bkey_cmp(where, k) < 0) in __bch_cut_front()
201 len = KEY_OFFSET(k) - KEY_OFFSET(where); in __bch_cut_front()
203 bkey_copy_key(k, where); in __bch_cut_front()
205 for (i = 0; i < KEY_PTRS(k); i++) in __bch_cut_front()
206 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); in __bch_cut_front()
208 BUG_ON(len > KEY_SIZE(k)); in __bch_cut_front()
209 SET_KEY_SIZE(k, len); in __bch_cut_front()
213 bool __bch_cut_back(const struct bkey *where, struct bkey *k) in __bch_cut_back() argument
217 if (bkey_cmp(where, k) >= 0) in __bch_cut_back()
220 BUG_ON(KEY_INODE(where) != KEY_INODE(k)); in __bch_cut_back()
222 if (bkey_cmp(where, &START_KEY(k)) > 0) in __bch_cut_back()
223 len = KEY_OFFSET(where) - KEY_START(k); in __bch_cut_back()
225 bkey_copy_key(k, where); in __bch_cut_back()
227 BUG_ON(len > KEY_SIZE(k)); in __bch_cut_back()
228 SET_KEY_SIZE(k, len); in __bch_cut_back()
509 static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) in bkey_to_cacheline() argument
511 return ((void *) k - (void *) t->data) / BSET_CACHELINE; in bkey_to_cacheline()
516 struct bkey *k) in bkey_to_cacheline_offset() argument
518 return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); in bkey_to_cacheline_offset()
547 static inline unsigned bfloat_mantissa(const struct bkey *k, in bfloat_mantissa() argument
550 const uint64_t *p = &k->low - (f->exponent >> 6); in bfloat_mantissa()
637 struct bkey *prev = NULL, *k = t->data->start; in bch_bset_build_written_tree() local
659 while (bkey_to_cacheline(t, k) < cacheline) in bch_bset_build_written_tree()
660 prev = k, k = bkey_next(k); in bch_bset_build_written_tree()
663 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); in bch_bset_build_written_tree()
666 while (bkey_next(k) != bset_bkey_last(t->data)) in bch_bset_build_written_tree()
667 k = bkey_next(k); in bch_bset_build_written_tree()
669 t->end = *k; in bch_bset_build_written_tree()
681 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) in bch_bset_fix_invalidated_key() argument
687 if (k < bset_bkey_last(t->data)) in bch_bset_fix_invalidated_key()
695 inorder = bkey_to_cacheline(t, k); in bch_bset_fix_invalidated_key()
697 if (k == t->data->start) in bch_bset_fix_invalidated_key()
700 if (bkey_next(k) == bset_bkey_last(t->data)) { in bch_bset_fix_invalidated_key()
701 t->end = *k; in bch_bset_fix_invalidated_key()
709 k == tree_to_bkey(t, j)) in bch_bset_fix_invalidated_key()
719 k == tree_to_prev_bkey(t, j)) in bch_bset_fix_invalidated_key()
729 struct bkey *k) in bch_bset_fix_lookup_table() argument
731 unsigned shift = bkey_u64s(k); in bch_bset_fix_lookup_table()
732 unsigned j = bkey_to_cacheline(t, k); in bch_bset_fix_lookup_table()
743 table_to_bkey(t, j) <= k) in bch_bset_fix_lookup_table()
753 k = table_to_bkey(t, j - 1); in bch_bset_fix_lookup_table()
755 while (k < cacheline_to_bkey(t, j, 0)) in bch_bset_fix_lookup_table()
756 k = bkey_next(k); in bch_bset_fix_lookup_table()
758 t->prev[j] = bkey_to_cacheline_offset(t, j, k); in bch_bset_fix_lookup_table()
767 for (k = table_to_bkey(t, t->size - 1); in bch_bset_fix_lookup_table()
768 k != bset_bkey_last(t->data); in bch_bset_fix_lookup_table()
769 k = bkey_next(k)) in bch_bset_fix_lookup_table()
770 if (t->size == bkey_to_cacheline(t, k)) { in bch_bset_fix_lookup_table()
771 t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k); in bch_bset_fix_lookup_table()
819 unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, in bch_btree_insert_key() argument
827 BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); in bch_btree_insert_key()
830 ? PRECEDING_KEY(&START_KEY(k)) in bch_btree_insert_key()
831 : PRECEDING_KEY(k)); in bch_btree_insert_key()
833 if (b->ops->insert_fixup(b, k, &iter, replace_key)) in bch_btree_insert_key()
839 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) in bch_btree_insert_key()
845 bch_bkey_try_merge(b, prev, k)) in bch_btree_insert_key()
850 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) in bch_btree_insert_key()
855 bch_bkey_try_merge(b, k, m)) in bch_btree_insert_key()
858 bch_bset_insert(b, m, k); in bch_btree_insert_key()
859 copy: bkey_copy(m, k); in bch_btree_insert_key()
1024 return bkey_cmp(l.k, r.k) > 0; in btree_iter_cmp()
1032 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, in bch_btree_iter_push() argument
1035 if (k != end) in bch_btree_iter_push()
1037 ((struct btree_iter_set) { k, end }), in bch_btree_iter_push()
1079 ret = iter->data->k; in __bch_btree_iter_next()
1080 iter->data->k = bkey_next(iter->data->k); in __bch_btree_iter_next()
1082 if (iter->data->k > iter->data->end) { in __bch_btree_iter_next()
1084 iter->data->k = iter->data->end; in __bch_btree_iter_next()
1087 if (iter->data->k == iter->data->end) in __bch_btree_iter_next()
1143 struct bkey *k, *last = NULL; in btree_mergesort() local
1144 BKEY_PADDED(k) tmp; in btree_mergesort()
1155 k = b->ops->sort_fixup(iter, &tmp.k); in btree_mergesort()
1157 k = NULL; in btree_mergesort()
1159 if (!k) in btree_mergesort()
1160 k = __bch_btree_iter_next(iter, b->ops->sort_cmp); in btree_mergesort()
1162 if (bad(b, k)) in btree_mergesort()
1167 bkey_copy(last, k); in btree_mergesort()
1168 } else if (!bch_bkey_try_merge(b, last, k)) { in btree_mergesort()
1170 bkey_copy(last, k); in btree_mergesort()