/lib/ |
D | list_debug.c | 21 struct list_head *next) in __list_add_valid() argument 25 CHECK_DATA_CORRUPTION(next == NULL, in __list_add_valid() 27 CHECK_DATA_CORRUPTION(next->prev != prev, in __list_add_valid() 29 prev, next->prev, next) || in __list_add_valid() 30 CHECK_DATA_CORRUPTION(prev->next != next, in __list_add_valid() 32 next, prev->next, prev) || in __list_add_valid() 33 CHECK_DATA_CORRUPTION(new == prev || new == next, in __list_add_valid() 35 new, prev, next)) in __list_add_valid() 44 struct list_head *prev, *next; in __list_del_entry_valid() local 47 next = entry->next; in __list_del_entry_valid() [all …]
|
D | list_sort.c | 28 tail = &a->next; in merge() 29 a = a->next; in merge() 36 tail = &b->next; in merge() 37 b = b->next; in merge() 64 tail->next = a; in merge_final() 67 a = a->next; in merge_final() 71 tail->next = b; in merge_final() 74 b = b->next; in merge_final() 83 tail->next = b; in merge_final() 96 b = b->next; in merge_final() [all …]
|
D | plist.c | 35 WARN(n->prev != p || p->next != n, in plist_check_prev_next() 39 t, t->next, t->prev, in plist_check_prev_next() 40 p, p->next, p->prev, in plist_check_prev_next() 41 n, n->next, n->prev); in plist_check_prev_next() 46 struct list_head *prev = top, *next = top->next; in plist_check_list() local 48 plist_check_prev_next(top, prev, next); in plist_check_list() 49 while (next != top) { in plist_check_list() 50 prev = next; in plist_check_list() 51 next = prev->next; in plist_check_list() 52 plist_check_prev_next(top, prev, next); in plist_check_list() [all …]
|
D | cpumask.c | 79 int next; in cpumask_next_wrap() local 82 next = cpumask_next(n, mask); in cpumask_next_wrap() 84 if (wrap && n < start && next >= start) { in cpumask_next_wrap() 87 } else if (next >= nr_cpumask_bits) { in cpumask_next_wrap() 93 return next; in cpumask_next_wrap() 249 int next, prev; in cpumask_any_and_distribute() local 254 next = cpumask_next_and(prev, src1p, src2p); in cpumask_any_and_distribute() 255 if (next >= nr_cpu_ids) in cpumask_any_and_distribute() 256 next = cpumask_first_and(src1p, src2p); in cpumask_any_and_distribute() 258 if (next < nr_cpu_ids) in cpumask_any_and_distribute() [all …]
|
D | llist.c | 32 new_last->next = first = READ_ONCE(head->first); in llist_add_batch() 55 struct llist_node *entry, *old_entry, *next; in llist_del_first() local 62 next = READ_ONCE(entry->next); in llist_del_first() 63 entry = cmpxchg(&head->first, old_entry, next); in llist_del_first() 85 head = head->next; in llist_reverse_order() 86 tmp->next = new_head; in llist_reverse_order()
|
D | bootconfig.c | 105 return node->next ? &xbc_nodes[node->next] : NULL; in xbc_node_get_next() 277 struct xbc_node *next; in xbc_node_find_next_leaf() local 288 next = xbc_node_get_subkey(node); in xbc_node_find_next_leaf() 289 if (next) { in xbc_node_find_next_leaf() 290 node = next; in xbc_node_find_next_leaf() 297 while (!node->next) { in xbc_node_find_next_leaf() 353 node->next = 0; in xbc_init_node() 374 while (node->next) in xbc_last_sibling() 397 sib->next = xbc_node_index(node); in __xbc_add_sibling() 401 node->next = last_parent->child; in __xbc_add_sibling() [all …]
|
D | kobject_uevent.c | 102 const char *next = buf; in action_arg_word_end() local 104 while (next <= buf_end && *next != delim) in action_arg_word_end() 105 if (!isalnum(*next++)) in action_arg_word_end() 108 if (next == buf) in action_arg_word_end() 111 return next; in action_arg_word_end() 118 const char *next, *buf_end, *key; in kobject_action_args() local 141 next = buf + UUID_STRING_LEN; in kobject_action_args() 144 while (next <= buf_end) { in kobject_action_args() 145 if (*next != ' ') in kobject_action_args() 149 key = ++next; in kobject_action_args() [all …]
|
D | klist.c | 379 struct klist_node *next; in klist_next() local 385 next = to_klist_node(last->n_node.next); in klist_next() 389 next = to_klist_node(i->i_klist->k_list.next); in klist_next() 392 while (next != to_klist_node(&i->i_klist->k_list)) { in klist_next() 393 if (likely(!knode_dead(next))) { in klist_next() 394 kref_get(&next->n_ref); in klist_next() 395 i->i_cur = next; in klist_next() 398 next = to_klist_node(next->n_node.next); in klist_next()
|
D | test_list_sort.c | 100 for (cur = head.next; cur->next != &head; cur = cur->next) { in list_sort_test() 104 if (cur->next->prev != cur) { in list_sort_test() 109 cmp_result = cmp(NULL, cur, cur->next); in list_sort_test() 116 el1 = container_of(cur->next, struct debug_el, list); in list_sort_test()
|
D | timerqueue.c | 85 struct rb_node *next; in timerqueue_iterate_next() local 89 next = rb_next(&node->node); in timerqueue_iterate_next() 90 if (!next) in timerqueue_iterate_next() 92 return container_of(next, struct timerqueue_node, node); in timerqueue_iterate_next()
|
D | rhashtable.c | 231 struct rhash_head *head, *next, *entry; in rhashtable_rehash_one() local 243 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); in rhashtable_rehash_one() 245 if (rht_is_a_nulls(next)) in rhashtable_rehash_one() 248 pprev = &entry->next; in rhashtable_rehash_one() 260 RCU_INIT_POINTER(entry->next, head); in rhashtable_rehash_one() 265 rcu_assign_pointer(*pprev, next); in rhashtable_rehash_one() 268 rht_assign_locked(bkt, next); in rhashtable_rehash_one() 510 pprev = &head->next; in rhashtable_lookup_one() 520 RCU_INIT_POINTER(list->next, plist); in rhashtable_lookup_one() 521 head = rht_dereference_bucket(head->next, tbl, hash); in rhashtable_lookup_one() [all …]
|
D | list-test.c | 55 KUNIT_EXPECT_PTR_EQ(test, list.next, &b); in list_test_list_add() 57 KUNIT_EXPECT_PTR_EQ(test, b.next, &a); in list_test_list_add() 69 KUNIT_EXPECT_PTR_EQ(test, list.next, &a); in list_test_list_add_tail() 71 KUNIT_EXPECT_PTR_EQ(test, a.next, &b); in list_test_list_add_tail() 86 KUNIT_EXPECT_PTR_EQ(test, list.next, &b); in list_test_list_del() 102 KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new); in list_test_list_replace() 118 KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new); in list_test_list_replace_init() 137 KUNIT_EXPECT_PTR_EQ(test, &b, list.next); in list_test_list_swap() 140 KUNIT_EXPECT_PTR_EQ(test, &a, b.next); in list_test_list_swap() 143 KUNIT_EXPECT_PTR_EQ(test, &list, a.next); in list_test_list_swap() [all …]
|
D | cmdline.c | 205 char *next; in next_arg() local 243 next = args + i + 1; in next_arg() 245 next = args + i; in next_arg() 248 return skip_spaces(next); in next_arg()
|
D | oid_registry.c | 79 goto next; in look_up_OID() 83 goto next; in look_up_OID() 87 next: in look_up_OID()
|
D | test_xarray.c | 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() local 216 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); in check_xa_mark_1() 219 for (i = base; i < next; i++) { in check_xa_mark_1() 244 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); in check_xa_mark_1() 245 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); in check_xa_mark_1() 246 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); in check_xa_mark_1() 248 xa_erase_index(xa, next); in check_xa_mark_1() 806 u32 next = 0; in check_xa_alloc_3() local 812 &next, GFP_KERNEL) != 0); in check_xa_alloc_3() 815 next = 0x3ffd; in check_xa_alloc_3() [all …]
|
D | ts_fsm.c | 137 struct ts_fsm_token *cur = NULL, *next; in fsm_find() local 169 next = &fsm->tokens[tok_idx + 1]; in fsm_find() 171 next = NULL; in fsm_find() 199 if (next == NULL) in fsm_find() 205 while (!match_token(next, data[block_idx])) { in fsm_find() 222 while (!match_token(next, data[block_idx])) { in fsm_find()
|
D | stackdepot.c | 62 struct stack_record *next; /* Link in the hashtable */ member 208 for (found = bucket; found; found = found->next) { in find_stack() 316 new->next = *bucket; in stack_depot_save()
|
D | test_hmm.c | 675 unsigned long next; in dmirror_migrate() local 688 for (addr = start; addr < end; addr = next) { in dmirror_migrate() 695 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); in dmirror_migrate() 696 if (next > vma->vm_end) in dmirror_migrate() 697 next = vma->vm_end; in dmirror_migrate() 703 args.end = next; in dmirror_migrate() 866 unsigned long next; in dmirror_snapshot() local 890 for (addr = start; addr < end; addr = next) { in dmirror_snapshot() 893 next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_snapshot() 895 range.end = next; in dmirror_snapshot()
|
D | xarray.c | 269 struct xa_node *next, *node = xas->xa_alloc; in xas_destroy() local 273 next = rcu_dereference_raw(node->parent); in xas_destroy() 275 xas->xa_alloc = node = next; in xas_destroy() 781 void *first, *next; in xas_store() local 799 next = first; in xas_store() 819 if (xa_is_node(next) && (!node || node->shift)) in xas_store() 820 xas_free_nodes(xas, xa_to_node(next)); in xas_store() 823 count += !next - !entry; in xas_store() 834 next = xa_entry_locked(xas->xa, node, ++offset); in xas_store() 835 if (!xa_is_sibling(next)) { in xas_store() [all …]
|
/lib/lzo/ |
D | lzo1x_decompress_safe.c | 44 size_t t, next; in lzo1x_decompress_safe() local 68 next = t; in lzo1x_decompress_safe() 121 next = t & 3; in lzo1x_decompress_safe() 132 next = t & 3; in lzo1x_decompress_safe() 139 next = t & 3; in lzo1x_decompress_safe() 163 next = get_unaligned_le16(ip); in lzo1x_decompress_safe() 165 m_pos -= next >> 2; in lzo1x_decompress_safe() 166 next &= 3; in lzo1x_decompress_safe() 169 next = get_unaligned_le16(ip); in lzo1x_decompress_safe() 170 if (((next & 0xfffc) == 0xfffc) && in lzo1x_decompress_safe() [all …]
|
/lib/zlib_inflate/ |
D | inftrees.c | 40 code *next; /* next available space in table */ in zlib_inflate_table() local 190 next = *table; /* current table to fill in */ in zlib_inflate_table() 224 next[(huff >> drop) + fill] = this; in zlib_inflate_table() 252 next += min; /* here min is 1 << curr */ in zlib_inflate_table() 273 (*table)[low].val = (unsigned short)(next - *table); in zlib_inflate_table() 292 next = *table; in zlib_inflate_table() 297 next[huff >> drop] = this; in zlib_inflate_table()
|
D | inflate.c | 48 state->lencode = state->distcode = state->next = state->codes; in zlib_inflateReset() 189 next = strm->next_in; \ 200 strm->next_in = next; \ 219 hold += (unsigned long)(*next++) << bits; \ 334 const unsigned char *next; /* next input */ in zlib_inflate() local 456 memcpy(put, next, copy); in zlib_inflate() 458 next += copy; in zlib_inflate() 492 state->next = state->codes; in zlib_inflate() 493 state->lencode = (code const *)(state->next); in zlib_inflate() 495 ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next), in zlib_inflate() [all …]
|
/lib/zlib_deflate/ |
D | deflate.c | 200 char *next; in zlib_deflateInit2() local 229 next = (char *) mem; in zlib_deflateInit2() 230 next += sizeof(*mem); in zlib_deflateInit2() 236 mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE); in zlib_deflateInit2() 238 mem->window_memory = (Byte *) next; in zlib_deflateInit2() 240 next += zlib_deflate_window_memsize(windowBits); in zlib_deflateInit2() 241 mem->prev_memory = (Pos *) next; in zlib_deflateInit2() 242 next += zlib_deflate_prev_memsize(windowBits); in zlib_deflateInit2() 243 mem->head_memory = (Pos *) next; in zlib_deflateInit2() 244 next += zlib_deflate_head_memsize(memLevel); in zlib_deflateInit2() [all …]
|
/lib/mpi/ |
D | mpih-mul.c | 399 if (!ctx->next) { in mpihelp_mul_karatsuba_case() 400 ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL); in mpihelp_mul_karatsuba_case() 401 if (!ctx->next) in mpihelp_mul_karatsuba_case() 407 ctx->next) < 0) in mpihelp_mul_karatsuba_case() 426 for (ctx = ctx->next; ctx; ctx = ctx2) { in mpihelp_release_karatsuba_ctx() 427 ctx2 = ctx->next; in mpihelp_release_karatsuba_ctx()
|
/lib/842/ |
D | 842_compress.c | 482 u64 last, next, pad, total; in sw842_compress() local 517 next = get_unaligned((u64 *)p->in); in sw842_compress() 528 if (next == last) { in sw842_compress() 536 if (next == last) /* reached max repeat bits */ in sw842_compress() 540 if (next == 0) in sw842_compress() 549 last = next; in sw842_compress()
|