/lib/ |
D | test_xarray.c | 69 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 71 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 174 unsigned int order; in check_xa_mark_1() local 204 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 205 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 206 unsigned long next = base + (1UL << order); in check_xa_mark_1() 214 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 303 unsigned int order; in check_xa_shrink() local 328 for (order = 0; order < max_order; order++) { in check_xa_shrink() 329 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
D | genalloc.c | 244 int order = pool->min_alloc_order; in gen_pool_destroy() local 251 end_bit = chunk_size(chunk) >> order; in gen_pool_destroy() 280 int order = pool->min_alloc_order; in gen_pool_alloc_algo_owner() local 293 nbits = (size + (1UL << order) - 1) >> order; in gen_pool_alloc_algo_owner() 300 end_bit = chunk_size(chunk) >> order; in gen_pool_alloc_algo_owner() 314 addr = chunk->start_addr + ((unsigned long)start_bit << order); in gen_pool_alloc_algo_owner() 315 size = nbits << order; in gen_pool_alloc_algo_owner() 489 int order = pool->min_alloc_order; in gen_pool_free_owner() local 499 nbits = (size + (1UL << order) - 1) >> order; in gen_pool_free_owner() 504 start_bit = (addr - chunk->start_addr) >> order; in gen_pool_free_owner() [all …]
|
D | scatterlist.c | 487 unsigned int order, bool chainable, in sgl_alloc_order() argument 495 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); in sgl_alloc_order() 497 if (length > (nent << (PAGE_SHIFT + order))) in sgl_alloc_order() 514 elem_len = min_t(u64, length, PAGE_SIZE << order); in sgl_alloc_order() 515 page = alloc_pages(gfp, order); in sgl_alloc_order() 560 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order) in sgl_free_n_order() argument 571 __free_pages(page, order); in sgl_free_n_order() 582 void sgl_free_order(struct scatterlist *sgl, int order) in sgl_free_order() argument 584 sgl_free_n_order(sgl, INT_MAX, order); in sgl_free_order()
|
D | test_meminit.c | 63 static int __init do_alloc_pages_order(int order, int *total_failures) in do_alloc_pages_order() argument 67 size_t size = PAGE_SIZE << order; in do_alloc_pages_order() 69 page = alloc_pages(GFP_KERNEL, order); in do_alloc_pages_order() 72 __free_pages(page, order); in do_alloc_pages_order() 74 page = alloc_pages(GFP_KERNEL, order); in do_alloc_pages_order() 79 __free_pages(page, order); in do_alloc_pages_order()
|
D | bitmap.c | 1032 static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op) in __reg_op() argument 1047 nbits_reg = 1 << order; in __reg_op() 1098 int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) in bitmap_find_free_region() argument 1102 for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) { in bitmap_find_free_region() 1103 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) in bitmap_find_free_region() 1105 __reg_op(bitmap, pos, order, REG_OP_ALLOC); in bitmap_find_free_region() 1123 void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) in bitmap_release_region() argument 1125 __reg_op(bitmap, pos, order, REG_OP_RELEASE); in bitmap_release_region() 1140 int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) in bitmap_allocate_region() argument 1142 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) in bitmap_allocate_region() [all …]
|
D | xarray.c | 642 unsigned int order = xas->xa_shift; in xas_create() local 670 while (shift > order) { in xas_create() 1569 unsigned int order = BITS_PER_LONG; in xa_store_range() local 1571 order = __ffs(last + 1); in xa_store_range() 1572 xas_set_order(&xas, last, order); in xa_store_range()
|
D | Kconfig | 366 values for parameters 'm' (Galois field order) and 't' 382 Constant value for Galois field order 'm'. If 'k' is the
|
D | Kconfig.debug | 544 order find ways to optimize the allocator. This should never be 574 In order to access the kmemleak file, debugfs needs to be
|
/lib/zlib_inflate/ |
D | inflate.c | 332 static const unsigned short order[19] = /* permutation of code lengths */ in zlib_inflate() local 472 state->lens[order[state->have++]] = (unsigned short)BITS(3); in zlib_inflate() 476 state->lens[order[state->have++]] = 0; in zlib_inflate()
|