/lib/ |
D | radix-tree.c | 86 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; in radix_tree_descend() local 87 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); in radix_tree_descend() 90 return offset; in radix_tree_descend() 99 int offset) in tag_set() argument 101 __set_bit(offset, node->tags[tag]); in tag_set() 105 int offset) in tag_clear() argument 107 __clear_bit(offset, node->tags[tag]); in tag_clear() 111 int offset) in tag_get() argument 113 return test_bit(offset, node->tags[tag]); in tag_get() 179 unsigned long offset) in radix_tree_find_next_bit() argument [all …]
|
D | generic-radix-tree.c | 54 void *__genradix_ptr(struct __genradix *radix, size_t offset) in __genradix_ptr() argument 60 if (ilog2(offset) >= genradix_depth_shift(level)) in __genradix_ptr() 71 n = n->children[offset >> genradix_depth_shift(level)]; in __genradix_ptr() 72 offset &= genradix_depth_size(level) - 1; in __genradix_ptr() 75 return &n->data[offset]; in __genradix_ptr() 104 void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, in __genradix_ptr_alloc() argument 118 if (n && ilog2(offset) < genradix_depth_shift(level)) in __genradix_ptr_alloc() 139 &n->children[offset >> genradix_depth_shift(level)]; in __genradix_ptr_alloc() 140 offset &= genradix_depth_size(level) - 1; in __genradix_ptr_alloc() 158 return &n->data[offset]; in __genradix_ptr_alloc() [all …]
|
D | maple_tree.c | 734 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) in mas_safe_min() argument 736 if (likely(offset)) in mas_safe_min() 737 return pivots[offset - 1] + 1; in mas_safe_min() 756 unsigned char offset, enum maple_type type) in mas_logical_pivot() argument 758 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); in mas_logical_pivot() 763 if (likely(offset)) in mas_logical_pivot() 825 void __rcu **slots, unsigned char offset) in mt_slot() argument 827 return rcu_dereference_check(slots[offset], mt_locked(mt)); in mt_slot() 831 unsigned char offset) in mt_slot_locked() argument 833 return rcu_dereference_protected(slots[offset], mt_locked(mt)); in mt_slot_locked() [all …]
|
D | cmdline_kunit.c | 41 static void cmdline_do_one_test(struct kunit *test, const char *in, int rc, int offset) in cmdline_do_one_test() argument 51 KUNIT_EXPECT_PTR_EQ_MSG(test, out, in + offset, fmt, in); in cmdline_do_one_test() 61 int offset; in cmdline_test_noint() local 64 offset = !!(*str == '-'); in cmdline_test_noint() 65 cmdline_do_one_test(test, str, rc, offset); in cmdline_test_noint() 77 int offset; in cmdline_test_lead_int() local 81 offset = strlen(in) - strlen(str) + !!(rc == 2); in cmdline_test_lead_int() 82 cmdline_do_one_test(test, in, rc, offset); in cmdline_test_lead_int() 95 int offset; in cmdline_test_tail_int() local 102 offset = rc ? strlen(in) : !!(*str == '-'); in cmdline_test_tail_int() [all …]
|
D | xarray.c | 84 unsigned int offset, xa_mark_t mark) in node_get_mark() argument 86 return test_bit(offset, node_marks(node, mark)); in node_get_mark() 90 static inline bool node_set_mark(struct xa_node *node, unsigned int offset, in node_set_mark() argument 93 return __test_and_set_bit(offset, node_marks(node, mark)); in node_set_mark() 97 static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, in node_clear_mark() argument 100 return __test_and_clear_bit(offset, node_marks(node, mark)); in node_clear_mark() 153 static void xas_move_index(struct xa_state *xas, unsigned long offset) in xas_move_index() argument 157 xas->xa_index += offset << shift; in xas_move_index() 203 unsigned int offset = get_offset(xas->xa_index, node); in xas_descend() local 204 void *entry = xa_entry(xas->xa, node, offset); in xas_descend() [all …]
|
D | pci_iomap.c | 30 unsigned long offset, in pci_iomap_range() argument 37 if (len <= offset || !start) in pci_iomap_range() 39 len -= offset; in pci_iomap_range() 40 start += offset; in pci_iomap_range() 70 unsigned long offset, in pci_iomap_wc_range() argument 81 if (len <= offset || !start) in pci_iomap_wc_range() 84 len -= offset; in pci_iomap_wc_range() 85 start += offset; in pci_iomap_wc_range()
|
D | find_bit.c | 206 unsigned long size, unsigned long offset) in find_next_clump8() argument 208 offset = find_next_bit(addr, size, offset); in find_next_clump8() 209 if (offset == size) in find_next_clump8() 212 offset = round_down(offset, 8); in find_next_clump8() 213 *clump = bitmap_get_value8(addr, offset); in find_next_clump8() 215 return offset; in find_next_clump8() 235 unsigned long size, unsigned long offset) in _find_next_zero_bit_le() argument 237 return FIND_NEXT_BIT(~addr[idx], swab, size, offset); in _find_next_zero_bit_le() 244 unsigned long size, unsigned long offset) in _find_next_bit_le() argument 246 return FIND_NEXT_BIT(addr[idx], swab, size, offset); in _find_next_bit_le()
|
D | scatterlist.c | 443 struct page **pages, unsigned int n_pages, unsigned int offset, in sg_alloc_append_table_from_pages() argument 465 sgt_append->prv->offset + sgt_append->prv->length) / in sg_alloc_append_table_from_pages() 468 if (WARN_ON(offset)) in sg_alloc_append_table_from_pages() 524 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; in sg_alloc_append_table_from_pages() 526 min_t(unsigned long, size, chunk_size), offset); in sg_alloc_append_table_from_pages() 529 offset = 0; in sg_alloc_append_table_from_pages() 566 unsigned int n_pages, unsigned int offset, in sg_alloc_table_from_pages_segment() argument 573 err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset, in sg_alloc_table_from_pages_segment() 726 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; in sg_page_count() 750 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; in sg_dma_page_count() [all …]
|
D | packing.c | 11 static int get_le_offset(int offset) in get_le_offset() argument 15 closest_multiple_of_4 = (offset / 4) * 4; in get_le_offset() 16 offset -= closest_multiple_of_4; in get_le_offset() 17 return closest_multiple_of_4 + (3 - offset); in get_le_offset() 20 static int get_reverse_lsw32_offset(int offset, size_t len) in get_reverse_lsw32_offset() argument 25 word_index = offset / 4; in get_reverse_lsw32_offset() 27 offset -= closest_multiple_of_4; in get_reverse_lsw32_offset() 29 return word_index * 4 + offset; in get_reverse_lsw32_offset()
|
D | iov_iter.c | 55 unsigned offset = p->bv_offset + skip; \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 91 size_t offset; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 109 offset += len; \ 213 if (unlikely(p->offset + p->len != abs(i->last_offset))) in sanity() [all …]
|
D | devres.c | 26 static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, in __devm_ioremap() argument 39 addr = ioremap(offset, size); in __devm_ioremap() 42 addr = ioremap_uc(offset, size); in __devm_ioremap() 45 addr = ioremap_wc(offset, size); in __devm_ioremap() 48 addr = ioremap_np(offset, size); in __devm_ioremap() 69 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, in devm_ioremap() argument 72 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); in devm_ioremap() 84 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, in devm_ioremap_uc() argument 87 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); in devm_ioremap_uc() 99 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, in devm_ioremap_wc() argument [all …]
|
D | sbitmap.c | 341 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) in emit_byte() argument 343 if ((offset & 0xf) == 0) { in emit_byte() 344 if (offset != 0) in emit_byte() 346 seq_printf(m, "%08x:", offset); in emit_byte() 348 if ((offset & 0x1) == 0) in emit_byte() 357 unsigned int offset = 0; in sbitmap_bitmap_show() local 373 emit_byte(m, offset, byte); in sbitmap_bitmap_show() 376 offset++; in sbitmap_bitmap_show() 383 emit_byte(m, offset, byte); in sbitmap_bitmap_show() 384 offset++; in sbitmap_bitmap_show() [all …]
|
D | logic_iomem.c | 72 static void __iomem *real_ioremap(phys_addr_t offset, size_t size) in real_ioremap() argument 75 (unsigned long long)offset, size); in real_ioremap() 86 void __iomem *ioremap(phys_addr_t offset, size_t size) in ioremap() argument 94 if (rreg->res->start > offset) in ioremap() 96 if (rreg->res->end < offset + size - 1) in ioremap() 111 offs = rreg->ops->map(offset - found->res->start, in ioremap() 131 return real_ioremap(offset, size); in ioremap()
|
D | ubsan.c | 359 unsigned long offset); 362 unsigned long offset) in __ubsan_handle_alignment_assumption() argument 372 if (offset) in __ubsan_handle_alignment_assumption() 374 align, offset, data->type->type_name); in __ubsan_handle_alignment_assumption() 379 real_ptr = ptr - offset; in __ubsan_handle_alignment_assumption() 381 offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0), in __ubsan_handle_alignment_assumption()
|
D | ts_kmp.c | 45 unsigned int i, q = 0, text_len, consumed = state->offset; in kmp_find() 63 state->offset = consumed + i + 1; in kmp_find() 64 return state->offset - kmp->pattern_len; in kmp_find()
|
D | stackdepot.c | 58 u32 offset : STACK_ALLOC_OFFSET_BITS; member 151 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; in depot_alloc_stack() 369 size_t offset = parts.offset << STACK_ALLOC_ALIGN; in stack_depot_fetch() local 384 stack = slab + offset; in stack_depot_fetch()
|
/lib/lzo/ |
D | lzo1x_decompress_safe.c | 79 size_t offset; in lzo1x_decompress_safe() local 86 offset = ip - ip_last; in lzo1x_decompress_safe() 87 if (unlikely(offset > MAX_255_COUNT)) in lzo1x_decompress_safe() 90 offset = (offset << 8) - offset; in lzo1x_decompress_safe() 91 t += offset + 15 + *ip++; in lzo1x_decompress_safe() 147 size_t offset; in lzo1x_decompress_safe() local 154 offset = ip - ip_last; in lzo1x_decompress_safe() 155 if (unlikely(offset > MAX_255_COUNT)) in lzo1x_decompress_safe() 158 offset = (offset << 8) - offset; in lzo1x_decompress_safe() 159 t += offset + 31 + *ip++; in lzo1x_decompress_safe() [all …]
|
/lib/pldmfw/ |
D | pldmfw.c | 25 size_t offset; member 65 pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length) in pldm_check_fw_space() argument 67 size_t expected_size = offset + length; in pldm_check_fw_space() 95 err = pldm_check_fw_space(data, data->offset, bytes_to_move); in pldm_move_fw_offset() 99 data->offset += bytes_to_move; in pldm_move_fw_offset() 153 err = pldm_check_fw_space(data, data->offset, header_size); in pldm_parse_header() 176 data->offset); in pldm_parse_header() 265 desc_start = data->fw->data + data->offset; in pldm_parse_desc_tlvs() 348 bitmap_ptr = data->fw->data + data->offset; in pldm_parse_one_record() 364 record->version_string = data->fw->data + data->offset; in pldm_parse_one_record() [all …]
|
/lib/zstd/compress/ |
D | zstd_ldm.c | 158 unsigned const offset = *pOffset; in ZSTD_ldm_insertEntry() local 160 *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry; in ZSTD_ldm_insertEntry() 161 *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1)); in ZSTD_ldm_insertEntry() 269 entry.offset = (U32)(split - base); in ZSTD_ldm_fillHashTable() 372 newEntry.offset = (U32)(split - base); in ZSTD_ldm_generateSequences_internal() 386 if (cur->checksum != checksum || cur->offset <= lowestIndex) { in ZSTD_ldm_generateSequences_internal() 391 cur->offset < dictLimit ? dictBase : base; in ZSTD_ldm_generateSequences_internal() 392 BYTE const* const pMatch = curMatchBase + cur->offset; in ZSTD_ldm_generateSequences_internal() 394 cur->offset < dictLimit ? dictEnd : iend; in ZSTD_ldm_generateSequences_internal() 396 cur->offset < dictLimit ? dictStart : lowPrefixPtr; in ZSTD_ldm_generateSequences_internal() [all …]
|
D | zstd_lazy.c | 947 size_t offset=0; in ZSTD_compressBlock_lazy_generic() local 974 matchLength = ml2, start = ip, offset=offsetFound; in ZSTD_compressBlock_lazy_generic() 987 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { in ZSTD_compressBlock_lazy_generic() 990 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); in ZSTD_compressBlock_lazy_generic() 992 matchLength = mlRep, offset = 0, start = ip; in ZSTD_compressBlock_lazy_generic() 1004 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); in ZSTD_compressBlock_lazy_generic() 1006 matchLength = mlRep, offset = 0, start = ip; in ZSTD_compressBlock_lazy_generic() 1012 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); in ZSTD_compressBlock_lazy_generic() 1014 matchLength = ml2, offset = offset2, start = ip; in ZSTD_compressBlock_lazy_generic() 1022 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { in ZSTD_compressBlock_lazy_generic() [all …]
|
D | zstd_double_fast.c | 127 U32 offset; in ZSTD_compressBlock_doubleFast_generic() local 168 offset = (U32)(ip-matchLong); in ZSTD_compressBlock_doubleFast_generic() 180 offset = (U32)(curr - dictMatchIndexL - dictIndexDelta); in ZSTD_compressBlock_doubleFast_generic() 219 offset = (U32)(ip-matchL3); in ZSTD_compressBlock_doubleFast_generic() 231 offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta); in ZSTD_compressBlock_doubleFast_generic() 239 offset = (U32)(curr - matchIndexS); in ZSTD_compressBlock_doubleFast_generic() 243 offset = (U32)(ip - match); in ZSTD_compressBlock_doubleFast_generic() 249 offset_1 = offset; in ZSTD_compressBlock_doubleFast_generic() 251 …ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATC… in ZSTD_compressBlock_doubleFast_generic() 420 U32 offset; in ZSTD_compressBlock_doubleFast_extDict_generic() local [all …]
|
D | zstd_compress_sequences.c | 319 BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); in ZSTD_encodeSequences_body() 322 BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, in ZSTD_encodeSequences_body() 325 BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); in ZSTD_encodeSequences_body() 340 (unsigned)sequences[n].offset); in ZSTD_encodeSequences_body() 356 BIT_addBits(&blockStream, sequences[n].offset, extraBits); in ZSTD_encodeSequences_body() 359 BIT_addBits(&blockStream, sequences[n].offset >> extraBits, in ZSTD_encodeSequences_body() 362 BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ in ZSTD_encodeSequences_body()
|
/lib/zstd/decompress/ |
D | zstd_decompress_block.c | 660 size_t offset; member 687 HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { in ZSTD_overlapCopy8() argument 689 if (offset < 8) { in ZSTD_overlapCopy8() 693 int const sub2 = dec64table[offset]; in ZSTD_overlapCopy8() 698 *ip += dec32table[offset]; in ZSTD_overlapCopy8() 773 const BYTE* match = oLitEnd - sequence.offset; in ZSTD_execSequenceEnd() 788 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { in ZSTD_execSequenceEnd() 790 … RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); in ZSTD_execSequenceEnd() 818 const BYTE* match = oLitEnd - sequence.offset; in ZSTD_execSequence() 854 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { in ZSTD_execSequence() [all …]
|
/lib/842/ |
D | 842_decompress.c | 165 u64 index, offset, total = round_down(p->out - p->ostart, 8); in __do_index() local 172 offset = index * size; in __do_index() 184 if (offset >= pos) in __do_index() 187 offset += section; in __do_index() 190 if (offset + size > total) { in __do_index() 192 (unsigned long)offset, (unsigned long)total); in __do_index() 201 (unsigned long)(index * size), (unsigned long)offset, in __do_index() 203 (unsigned long)beN_to_cpu(&p->ostart[offset], size)); in __do_index() 205 memcpy(p->out, &p->ostart[offset], size); in __do_index()
|
/lib/lz4/ |
D | lz4_decompress.c | 124 size_t offset; in LZ4_decompress_generic() local 165 offset = LZ4_readLE16(ip); in LZ4_decompress_generic() 167 match = op - offset; in LZ4_decompress_generic() 172 (offset >= 8) && in LZ4_decompress_generic() 289 offset = LZ4_readLE16(ip); in LZ4_decompress_generic() 291 match = op - offset; in LZ4_decompress_generic() 311 LZ4_write32(op, (U32)offset); in LZ4_decompress_generic() 405 if (unlikely(offset < 8)) { in LZ4_decompress_generic() 410 match += inc32table[offset]; in LZ4_decompress_generic() 412 match -= dec64table[offset]; in LZ4_decompress_generic()
|