| /lib/zlib_inflate/ |
| D | inftrees.c | 28 unsigned min, max; /* minimum and maximum code lengths */ in zlib_inflate_table() local 112 for (min = 1; min < MAXBITS; min++) in zlib_inflate_table() 113 if (count[min] != 0) break; in zlib_inflate_table() 114 if (root < min) root = min; in zlib_inflate_table() 189 len = min; /* starting code length */ in zlib_inflate_table() 221 min = fill; /* save offset to next table */ in zlib_inflate_table() 252 next += min; /* here min is 1 << curr */ in zlib_inflate_table()
|
| /lib/ |
| D | linear_ranges.c | 72 return r->min + (r->max_sel - r->min_sel) * r->step; in linear_range_get_max_value() 93 *val = r->min + (selector - r->min_sel) * r->step; in linear_range_get_value() 144 if (r->min > val) in linear_range_get_selector_low() 157 *selector = (val - r->min) / r->step + r->min_sel; in linear_range_get_selector_low() 228 if (r->min > val) { in linear_range_get_selector_high() 238 *selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel; in linear_range_get_selector_high() 258 if (r->min > val) { in linear_range_get_selector_within() 271 *selector = (val - r->min) / r->step + r->min_sel; in linear_range_get_selector_within()
|
| D | maple_tree.c | 378 return !mas->min && mas->max == ULONG_MAX; in mas_is_root_limits() 722 return mas->min; in mas_safe_min() 1018 mas->min = pivots[mas->offset - 1] + 1; in mas_descend() 1058 unsigned long min, max; in mas_ascend() local 1084 mas->min = 0; in mas_ascend() 1088 min = 0; in mas_ascend() 1091 min = mas->min; in mas_ascend() 1111 min = pivots[a_slot - 1] + 1; in mas_ascend() 1128 mas->min = min; in mas_ascend() 1262 max_req = min(requested, max_req); in mas_alloc_nodes() [all …]
|
| D | nlattr.c | 123 (pt->min < 0 || pt->max < 0)); in nla_get_range_unsigned() 125 range->min = 0; in nla_get_range_unsigned() 153 range->min = pt->min; in nla_get_range_unsigned() 160 range->min = pt->min; in nla_get_range_unsigned() 226 if (value < range.min || value > range.max) { in nla_validate_range_unsigned() 247 range->min = S8_MIN; in nla_get_range_signed() 251 range->min = S16_MIN; in nla_get_range_signed() 255 range->min = S32_MIN; in nla_get_range_signed() 260 range->min = S64_MIN; in nla_get_range_signed() 270 range->min = pt->min; in nla_get_range_signed() [all …]
|
| D | idr.c | 380 int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, in ida_alloc_range() argument 383 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); in ida_alloc_range() 384 unsigned bit = min % IDA_BITMAP_BITS; in ida_alloc_range() 388 if ((int)min < 0) in ida_alloc_range() 398 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range() 455 xas.xa_index = min / IDA_BITMAP_BITS; in ida_alloc_range() 456 bit = min % IDA_BITMAP_BITS; in ida_alloc_range() 469 xas_set(&xas, min / IDA_BITMAP_BITS); in ida_alloc_range() 470 bit = min % IDA_BITMAP_BITS; in ida_alloc_range()
|
| D | iov_iter.c | 93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable() 97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable() 103 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable() 136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable() 140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable() 146 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable() 361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter() 391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter_nofault() 421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter() 668 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count() [all …]
|
| D | kfifo.c | 103 l = min(len, size - off); in kfifo_copy_in() 142 l = min(len, size - off); in kfifo_copy_out() 204 l = min(len, size - off); in kfifo_copy_from_user() 264 l = min(len, size - off); in kfifo_copy_to_user() 342 len_to_end = min(len, size - off); in setup_sgl() 481 return min(n, __kfifo_peek_n(fifo, recsize)); in __kfifo_out_linear_r()
|
| D | test_xarray.c | 588 unsigned long min = index & ~((1UL << order) - 1); in check_multi_store_1() local 589 unsigned long max = min + (1UL << order); in check_multi_store_1() 592 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); in check_multi_store_1() 595 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); in check_multi_store_1() 598 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); in check_multi_store_1() 600 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); in check_multi_store_1() 601 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); in check_multi_store_1() 603 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); in check_multi_store_1() 605 xa_erase_index(xa, min); in check_multi_store_1() 1135 unsigned int min = 1 << i; in check_store_iter() local [all …]
|
| D | find_bit.c | 35 sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \ 66 sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
|
| D | kstrtox.c | 400 count = min(count, sizeof(buf) - 1); in kstrtobool_from_user() 414 count = min(count, sizeof(buf) - 1); \
|
| D | seq_buf.c | 277 start_len = min(len, MAX_MEMHEX_BYTES); 415 linelen = min(remaining, rowsize);
|
| D | xarray.c | 1856 xas.xa_index = limit.min; in __xa_alloc() 1897 u32 min = limit.min; in __xa_alloc_cyclic() local 1900 limit.min = max(min, *next); in __xa_alloc_cyclic() 1907 if (ret < 0 && limit.min > min) { in __xa_alloc_cyclic() 1908 limit.min = min; in __xa_alloc_cyclic()
|
| D | objagg.c | 228 unsigned int min, max; in objagg_obj_root_id_alloc() local 238 min = hnode->root_id; in objagg_obj_root_id_alloc() 244 min = objagg->hints->root_count; in objagg_obj_root_id_alloc() 248 root_id = ida_alloc_range(&objagg->root_ida, min, max, GFP_KERNEL); in objagg_obj_root_id_alloc()
|
| D | hexdump.c | 273 linelen = min(remaining, rowsize); in print_hex_dump()
|
| D | test_hmm.c | 349 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault() 802 next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)); in dmirror_exclusive() 927 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); in dmirror_migrate_to_system() 987 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); in dmirror_migrate_to_device() 1195 next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_snapshot()
|
| /lib/crypto/ |
| D | aescfb.c | 50 crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE)); in aescfb_encrypt() 87 crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE)); in aescfb_decrypt()
|
| D | poly1305.c | 36 bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen); in poly1305_update_generic()
|
| D | aesgcm.c | 68 crypto_xor((u8 *)ghash, src, min(len, GHASH_BLOCK_SIZE)); in aesgcm_ghash() 124 crypto_xor_cpy(dst, src, buf, min(len, AES_BLOCK_SIZE)); in aesgcm_crypt()
|
| /lib/test_fortify/ |
| D | Makefile | 23 always-$(call gcc-min-version, 80000) += test_fortify.log
|
| /lib/math/ |
| D | rational.c | 88 t = min(t, (max_numerator - n0) / n1); in rational_best_approximation()
|
| D | reciprocal_div.c | 28 R.sh1 = min(l, 1); in reciprocal_value()
|
| /lib/zstd/compress/ |
| D | huf_compress.c | 256 { U16 min = 0; in HUF_readCTable() local 258 valPerRank[n] = min; /* get starting value within each rank */ in HUF_readCTable() 259 min += nbPerRank[n]; in HUF_readCTable() 260 min >>= 1; in HUF_readCTable() 664 { U16 min = 0; in HUF_buildCTableFromTree() local 666 valPerRank[n] = min; /* get starting value within each rank */ in HUF_buildCTableFromTree() 667 min += nbPerRank[n]; in HUF_buildCTableFromTree() 668 min >>= 1; in HUF_buildCTableFromTree()
|
| /lib/zstd/common/ |
| D | zstd_internal.h | 51 #define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) argument
|
| /lib/lz4/ |
| D | lz4_decompress.c | 342 length = min(length, (size_t)(oend - op)); in LZ4_decompress_generic() 388 size_t const mlen = min(length, (size_t)(oend - op)); in LZ4_decompress_generic() 472 dstCapacity = min(targetOutputSize, dstCapacity); in LZ4_decompress_safe_partial()
|
| /lib/xz/ |
| D | xz_dec_test.c | 123 buffers.in_size = min(remaining, sizeof(buffer_in)); in xz_dec_test_write()
|