/lib/ |
D | test_user_copy.c | 42 static bool is_zeroed(void *from, size_t size) in is_zeroed() argument 44 return memchr_inv(from, 0x0, size) == NULL; in is_zeroed() 47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument 52 if (test(size < 2 * PAGE_SIZE, "buffer too small")) in test_check_nonzero_user() 61 size = 1024; in test_check_nonzero_user() 62 start = PAGE_SIZE - (size / 2); in test_check_nonzero_user() 67 zero_start = size / 4; in test_check_nonzero_user() 68 zero_end = size - zero_start; in test_check_nonzero_user() 81 memset(kmem, 0x0, size); in test_check_nonzero_user() 84 for (i = zero_end; i < size; i += 2) in test_check_nonzero_user() [all …]
|
D | test_meminit.c | 29 static int __init count_nonzero_bytes(void *ptr, size_t size) in count_nonzero_bytes() argument 34 for (i = 0; i < size; i++) in count_nonzero_bytes() 41 static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip) in fill_with_garbage_skip() argument 46 WARN_ON(skip > size); in fill_with_garbage_skip() 47 size -= skip; in fill_with_garbage_skip() 49 while (size >= sizeof(*p)) { in fill_with_garbage_skip() 52 size -= sizeof(*p); in fill_with_garbage_skip() 54 if (size) in fill_with_garbage_skip() 55 memset(&p[i], GARBAGE_BYTE, size); in fill_with_garbage_skip() 58 static void __init fill_with_garbage(void *ptr, size_t size) in fill_with_garbage() argument [all …]
|
D | test_kasan.c | 33 size_t size = 123; in kmalloc_oob_right() local 36 ptr = kmalloc(size, GFP_KERNEL); in kmalloc_oob_right() 42 ptr[size] = 'x'; in kmalloc_oob_right() 49 size_t size = 15; in kmalloc_oob_left() local 52 ptr = kmalloc(size, GFP_KERNEL); in kmalloc_oob_left() 65 size_t size = 4096; in kmalloc_node_oob_right() local 68 ptr = kmalloc_node(size, GFP_KERNEL, 0); in kmalloc_node_oob_right() 74 ptr[size] = 0; in kmalloc_node_oob_right() 82 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; in kmalloc_pagealloc_oob_right() local 88 ptr = kmalloc(size, GFP_KERNEL); in kmalloc_pagealloc_oob_right() [all …]
|
D | sort.c | 33 static bool is_aligned(const void *base, size_t size, unsigned char align) in is_aligned() argument 35 unsigned char lsbits = (unsigned char)size; in is_aligned() 120 typedef void (*swap_func_t)(void *a, void *b, int size); 135 static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) in do_swap() argument 138 swap_words_64(a, b, size); in do_swap() 140 swap_words_32(a, b, size); in do_swap() 142 swap_bytes(a, b, size); in do_swap() 144 swap_func(a, b, (int)size); in do_swap() 178 static size_t parent(size_t i, unsigned int lsbit, size_t size) in parent() argument 180 i -= size; in parent() [all …]
|
D | find_bit.c | 67 unsigned long find_next_bit(const unsigned long *addr, unsigned long size, in find_next_bit() argument 70 return _find_next_bit(addr, NULL, size, offset, 0UL); in find_next_bit() 76 unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, in find_next_zero_bit() argument 79 return _find_next_bit(addr, NULL, size, offset, ~0UL); in find_next_zero_bit() 86 const unsigned long *addr2, unsigned long size, in find_next_and_bit() argument 89 return _find_next_bit(addr1, addr2, size, offset, 0UL); in find_next_and_bit() 98 unsigned long find_first_bit(const unsigned long *addr, unsigned long size) in find_first_bit() argument 102 for (idx = 0; idx * BITS_PER_LONG < size; idx++) { in find_first_bit() 104 return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size); in find_first_bit() 107 return size; in find_first_bit() [all …]
|
D | genalloc.c | 87 const int size = start + nr; in bitmap_set_ll() local 100 mask_to_set &= BITMAP_LAST_WORD_MASK(size); in bitmap_set_ll() 122 const int size = start + nr; in bitmap_clear_ll() local 135 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); in bitmap_clear_ll() 183 size_t size, int nid, void *owner) in gen_pool_add_owner() argument 186 int nbits = size >> pool->min_alloc_order; in gen_pool_add_owner() 196 chunk->end_addr = virt + size - 1; in gen_pool_add_owner() 198 atomic_long_set(&chunk->avail, size); in gen_pool_add_owner() 275 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, in gen_pool_alloc_algo_owner() argument 290 if (size == 0) in gen_pool_alloc_algo_owner() [all …]
|
D | decompress_unlz4.c | 43 long size = in_len; in unlz4() local 84 size = fill(inp, 4); in unlz4() 85 if (size < 4) { in unlz4() 95 size -= 4; in unlz4() 108 size = fill(inp, 4); in unlz4() 109 if (size == 0) in unlz4() 111 if (size < 4) { in unlz4() 121 size -= 4; in unlz4() 134 size -= 4; in unlz4() 140 size = fill(inp, chunksize); in unlz4() [all …]
|
D | seq_buf.c | 30 return s->len + len <= s->size; in seq_buf_can_fit() 61 WARN_ON(s->size == 0); in seq_buf_vprintf() 63 if (s->len < s->size) { in seq_buf_vprintf() 64 len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); in seq_buf_vprintf() 65 if (s->len + len < s->size) { in seq_buf_vprintf() 118 WARN_ON(s->size == 0); in seq_buf_bprintf() 120 if (s->len < s->size) { in seq_buf_bprintf() 122 if (s->len + ret < s->size) { in seq_buf_bprintf() 145 WARN_ON(s->size == 0); in seq_buf_puts() 171 WARN_ON(s->size == 0); in seq_buf_putc() [all …]
|
D | usercopy.c | 50 int check_zeroed_user(const void __user *from, size_t size) in check_zeroed_user() argument 55 if (unlikely(size == 0)) in check_zeroed_user() 59 size += align; in check_zeroed_user() 61 if (!user_access_begin(from, size)) in check_zeroed_user() 68 while (size > sizeof(unsigned long)) { in check_zeroed_user() 73 size -= sizeof(unsigned long); in check_zeroed_user() 78 if (size < sizeof(unsigned long)) in check_zeroed_user() 79 val &= aligned_byte_mask(size); in check_zeroed_user()
|
D | stackdepot.c | 62 u32 size; /* Number of frames in the stack */ member 99 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, in depot_alloc_stack() argument 103 sizeof(unsigned long) * size; in depot_alloc_stack() 130 stack->size = size; in depot_alloc_stack() 134 memcpy(stack->entries, entries, size * sizeof(unsigned long)); in depot_alloc_stack() 150 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument 153 size * sizeof(unsigned long) / sizeof(u32), in hash_stack() 174 unsigned long *entries, int size, in find_stack() argument 181 found->size == size && in find_stack() 182 !stackdepot_memcmp(entries, found->entries, size)) in find_stack() [all …]
|
D | decompress_unxz.c | 157 #define kmalloc(size, flags) malloc(size) argument 159 #define vmalloc(size) malloc(size) argument 177 static bool memeq(const void *a, const void *b, size_t size) in memeq() argument 183 for (i = 0; i < size; ++i) in memeq() 192 static void memzero(void *buf, size_t size) in memzero() argument 195 uint8_t *e = b + size; in memzero() 204 void *memmove(void *dest, const void *src, size_t size) in memmove() argument 211 for (i = 0; i < size; ++i) in memmove() 214 i = size; in memmove() 252 long (*fill)(void *dest, unsigned long size), in unxz() argument [all …]
|
D | devres.c | 26 resource_size_t size, in __devm_ioremap() argument 37 addr = ioremap(offset, size); in __devm_ioremap() 40 addr = ioremap_nocache(offset, size); in __devm_ioremap() 43 addr = ioremap_wc(offset, size); in __devm_ioremap() 65 resource_size_t size) in devm_ioremap() argument 67 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); in devm_ioremap() 81 resource_size_t size) in devm_ioremap_nocache() argument 83 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC); in devm_ioremap_nocache() 96 resource_size_t size) in devm_ioremap_wc() argument 98 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC); in devm_ioremap_wc() [all …]
|
D | bucket_locks.c | 19 unsigned int i, size; in __alloc_bucket_spinlocks() local 28 size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size); in __alloc_bucket_spinlocks() 30 size = max_size; in __alloc_bucket_spinlocks() 34 tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp); in __alloc_bucket_spinlocks() 37 for (i = 0; i < size; i++) { in __alloc_bucket_spinlocks() 44 *locks_mask = size - 1; in __alloc_bucket_spinlocks()
|
D | logic_pio.c | 42 if (!new_range || !new_range->fwnode || !new_range->size) in logic_pio_register_range() 46 end = new_range->hw_start + new_range->size; in logic_pio_register_range() 57 if (start >= range->hw_start + range->size || in logic_pio_register_range() 59 mmio_end = range->io_start + range->size; in logic_pio_register_range() 66 iio_sz += range->size; in logic_pio_register_range() 72 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) { in logic_pio_register_range() 78 new_range->size = SZ_64K; in logic_pio_register_range() 83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { in logic_pio_register_range() 146 if (in_range(pio, range->io_start, range->size)) { in find_io_range() 188 resource_size_t addr, resource_size_t size) in logic_pio_trans_hwaddr() argument [all …]
|
D | test_static_keys.c | 55 static void invert_keys(struct test_key *keys, int size) in invert_keys() argument 60 for (i = 0; i < size; i++) { in invert_keys() 68 static int verify_keys(struct test_key *keys, int size, bool invert) in verify_keys() argument 73 for (i = 0; i < size; i++) { in verify_keys() 112 int size; in test_key_func() local 211 size = ARRAY_SIZE(static_key_tests); in test_key_func() 213 ret = verify_keys(static_key_tests, size, false); in test_key_func() 217 invert_keys(static_key_tests, size); in test_key_func() 218 ret = verify_keys(static_key_tests, size, true); in test_key_func() 222 invert_keys(static_key_tests, size); in test_key_func() [all …]
|
D | kfifo.c | 24 int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, in __kfifo_alloc() argument 31 size = roundup_pow_of_two(size); in __kfifo_alloc() 37 if (size < 2) { in __kfifo_alloc() 43 fifo->data = kmalloc_array(esize, size, gfp_mask); in __kfifo_alloc() 49 fifo->mask = size - 1; in __kfifo_alloc() 67 unsigned int size, size_t esize) in __kfifo_init() argument 69 size /= esize; in __kfifo_init() 71 if (!is_power_of_2(size)) in __kfifo_init() 72 size = rounddown_pow_of_two(size); in __kfifo_init() 79 if (size < 2) { in __kfifo_init() [all …]
|
D | rhashtable.c | 66 static void nested_table_free(union nested_table *ntbl, unsigned int size) in nested_table_free() argument 76 if (size > len) { in nested_table_free() 77 size >>= shift; in nested_table_free() 79 nested_table_free(ntbl + i, size); in nested_table_free() 87 unsigned int size = tbl->size >> tbl->nest; in nested_bucket_table_free() local 95 nested_table_free(ntbl + i, size); in nested_bucket_table_free() 144 size_t size; in nested_bucket_table_alloc() local 149 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); in nested_bucket_table_alloc() 151 tbl = kzalloc(size, gfp); in nested_bucket_table_alloc() 171 size_t size; in bucket_table_alloc() local [all …]
|
D | string_helpers.c | 34 void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, in string_get_size() argument 60 size = 0; in string_get_size() 61 if (size == 0) in string_get_size() 80 while (size >> 32) { in string_get_size() 81 do_div(size, divisor[units]); in string_get_size() 87 size *= blk_size; in string_get_size() 90 while (size >= divisor[units]) { in string_get_size() 91 remainder = do_div(size, divisor[units]); in string_get_size() 97 sf_cap = size; in string_get_size() 114 size += 1; in string_get_size() [all …]
|
D | sg_split.c | 26 size_t size = sizes[0], len; in sg_calculate_split() local 42 len = min_t(size_t, size, sglen - skip); in sg_calculate_split() 47 size -= len; in sg_calculate_split() 51 while (!size && (skip + len < sglen) && (--nb_splits > 0)) { in sg_calculate_split() 53 size = *(++sizes); in sg_calculate_split() 55 len = min_t(size_t, size, sglen - skip); in sg_calculate_split() 61 size -= len; in sg_calculate_split() 65 if (!size && --nb_splits > 0) { in sg_calculate_split() 67 size = *(++sizes); in sg_calculate_split() 74 return (size || !splitters[0].in_sg0) ? -EINVAL : 0; in sg_calculate_split()
|
D | iommu-helper.c | 9 unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, in iommu_area_alloc() argument 17 size -= 1; in iommu_area_alloc() 19 index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); in iommu_area_alloc() 20 if (index < size) { in iommu_area_alloc()
|
/lib/mpi/ |
D | mpih-mul.c | 21 #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ argument 23 if ((size) < KARATSUBA_THRESHOLD) \ 24 mul_n_basecase(prodp, up, vp, size); \ 26 mul_n(prodp, up, vp, size, tspace); \ 29 #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \ argument 31 if ((size) < KARATSUBA_THRESHOLD) \ 32 mpih_sqr_n_basecase(prodp, up, size); \ 34 mpih_sqr_n(prodp, up, size, tspace); \ 55 mul_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size) in mul_n_basecase() argument 66 MPN_COPY(prodp, up, size); in mul_n_basecase() [all …]
|
/lib/xz/ |
D | xz_dec_bcj.c | 60 size_t size; member 88 static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) in bcj_x86() argument 103 if (size <= 4) in bcj_x86() 106 size -= 4; in bcj_x86() 107 for (i = 0; i < size; ++i) { in bcj_x86() 160 static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) in bcj_powerpc() argument 165 for (i = 0; i + 4 <= size; i += 4) { in bcj_powerpc() 181 static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) in bcj_ia64() argument 222 for (i = 0; i + 16 <= size; i += 16) { in bcj_ia64() 265 static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) in bcj_arm() argument [all …]
|
D | xz_dec_stream.c | 75 uint32_t size; member 106 vli_type size; member 127 size_t size; member 160 b->in_size - b->in_pos, s->temp.size - s->temp.pos); in fill_temp() 166 if (s->temp.pos == s->temp.size) { in fill_temp() 259 s->block.hash.unpadded += s->block_header.size in dec_block() 284 s->index.size += in_used; in index_update() 435 if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) in dec_stream_footer() 457 s->temp.size -= 4; in dec_block_header() 458 if (xz_crc32(s->temp.buf, s->temp.size, 0) in dec_block_header() [all …]
|
/lib/zstd/ |
D | zstd_common.c | 28 #define stack_push(stack, size) \ argument 31 (stack)->ptr = (char *)ptr + (size); \ 51 void *ZSTD_stackAllocAll(void *opaque, size_t *size) in ZSTD_stackAllocAll() argument 54 *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr); in ZSTD_stackAllocAll() 55 return stack_push(stack, *size); in ZSTD_stackAllocAll() 58 void *ZSTD_stackAlloc(void *opaque, size_t size) in ZSTD_stackAlloc() argument 61 return stack_push(stack, size); in ZSTD_stackAlloc() 69 void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.o… in ZSTD_malloc() argument
|
/lib/livepatch/ |
D | test_klp_shadow_vars.c | 71 static void *shadow_alloc(void *obj, unsigned long id, size_t size, in shadow_alloc() argument 75 void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, in shadow_alloc() 78 __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), in shadow_alloc() 83 static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size, in shadow_get_or_alloc() argument 87 void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, in shadow_get_or_alloc() 90 __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), in shadow_get_or_alloc() 131 size_t size = sizeof(int *); in test_klp_shadow_vars_init() local 156 sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1); in test_klp_shadow_vars_init() 160 sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2); in test_klp_shadow_vars_init() 164 sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3); in test_klp_shadow_vars_init() [all …]
|