Home
last modified time | relevance | path

Searched refs:size (Results 1 – 25 of 147) sorted by relevance

123456

/kernel/dma/
Ddirect.c70 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
76 return dma_addr + size - 1 <= in dma_coherent_ok()
80 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument
84 return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_decrypted()
87 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument
93 ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_encrypted()
100 size_t size) in __dma_direct_free_pages() argument
102 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
104 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
107 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() argument
[all …]
Dmapping.c26 size_t size; member
36 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
45 WARN_ON(this->size != match->size || in dmam_match()
61 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument
64 struct dma_devres match_data = { size, vaddr, dma_handle }; in dmam_free_coherent()
66 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent()
85 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument
95 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
103 dr->size = size; in dmam_alloc_attrs()
144 size_t offset, size_t size, enum dma_data_direction dir, in dma_map_page_attrs() argument
[all …]
Dcoherent.c17 int size; member
39 dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) in dma_init_coherent_memory() argument
42 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory()
45 if (!size) in dma_init_coherent_memory()
48 mem_base = memremap(phys_addr, size, MEMREMAP_WC); in dma_init_coherent_memory()
62 dma_mem->size = pages; in dma_init_coherent_memory()
73 &phys_addr, size / SZ_1M); in dma_init_coherent_memory()
118 dma_addr_t device_addr, size_t size) in dma_declare_coherent_memory() argument
123 mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); in dma_declare_coherent_memory()
143 ssize_t size, dma_addr_t *dma_handle) in __dma_alloc_from_coherent() argument
[all …]
Ddirect.h14 void *cpu_addr, dma_addr_t dma_addr, size_t size,
18 void *cpu_addr, dma_addr_t dma_addr, size_t size,
56 dma_addr_t addr, size_t size, enum dma_data_direction dir) in dma_direct_sync_single_for_device() argument
61 swiotlb_sync_single_for_device(dev, paddr, size, dir); in dma_direct_sync_single_for_device()
64 arch_sync_dma_for_device(paddr, size, dir); in dma_direct_sync_single_for_device()
68 dma_addr_t addr, size_t size, enum dma_data_direction dir) in dma_direct_sync_single_for_cpu() argument
73 arch_sync_dma_for_cpu(paddr, size, dir); in dma_direct_sync_single_for_cpu()
78 swiotlb_sync_single_for_cpu(dev, paddr, size, dir); in dma_direct_sync_single_for_cpu()
81 arch_dma_mark_clean(paddr, size); in dma_direct_sync_single_for_cpu()
85 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument
[all …]
Ddebug.h13 size_t offset, size_t size,
18 size_t size, int direction);
27 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
31 extern void debug_dma_free_coherent(struct device *dev, size_t size,
35 size_t size, int direction,
40 size_t size, int direction);
43 dma_addr_t dma_handle, size_t size,
48 size_t size, int direction);
59 size_t offset, size_t size, in debug_dma_map_page() argument
66 size_t size, int direction) in debug_dma_unmap_page() argument
[all …]
Dops_helpers.c19 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument
27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
36 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument
41 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_mmap()
48 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap()
63 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages() argument
69 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages()
71 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
75 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages()
78 dma_free_contiguous(dev, page, size); in dma_common_alloc_pages()
[all …]
Dpool.c46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument
49 pool_size_dma += size; in dma_atomic_pool_size_add()
51 pool_size_dma32 += size; in dma_atomic_pool_size_add()
53 pool_size_kernel += size; in dma_atomic_pool_size_add()
58 unsigned long size; in cma_in_zone() local
66 size = cma_get_size(cma); in cma_in_zone()
67 if (!size) in cma_in_zone()
71 end = cma_get_base(cma) + size - 1; in cma_in_zone()
240 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size, in __dma_alloc_from_pool() argument
247 addr = gen_pool_alloc(pool, size); in __dma_alloc_from_pool()
[all …]
Dcontiguous.c207 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
228 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, in dma_contiguous_reserve_area() argument
234 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area()
283 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument
285 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); in cma_alloc_aligned()
287 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); in cma_alloc_aligned()
305 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument
315 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
316 if (size <= PAGE_SIZE) in dma_alloc_contiguous()
325 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
[all …]
Ddebug.c70 u64 size; member
283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) in containing_match()
315 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find()
418 entry->dev_addr, entry->size, in debug_dma_dump_mappings()
794 entry->dev_addr, entry->size, in dump_show()
862 count, entry->dev_addr, entry->size, in dma_debug_device_change()
975 ref->dev_addr, ref->size); in check_unmap()
980 if (ref->size != entry->size) { in check_unmap()
985 ref->dev_addr, entry->size, ref->size); in check_unmap()
993 ref->dev_addr, ref->size, in check_unmap()
[all …]
Dswiotlb.c196 void __init swiotlb_adjust_size(unsigned long size) in swiotlb_adjust_size() argument
206 size = ALIGN(size, IO_TLB_SIZE); in swiotlb_adjust_size()
207 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); in swiotlb_adjust_size()
209 size = default_nslabs << IO_TLB_SHIFT; in swiotlb_adjust_size()
210 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); in swiotlb_adjust_size()
427 int swiotlb_init_late(size_t size, gfp_t gfp_mask, in swiotlb_init_late() argument
431 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); in swiotlb_init_late()
551 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, in swiotlb_bounce() argument
578 alloc_size, size, tlb_offset); in swiotlb_bounce()
585 if (size > alloc_size) { in swiotlb_bounce()
[all …]
/kernel/kcsan/
Dcore.c115 size_t size, in find_watchpoint() argument
140 if (matching_access(wp_addr_masked, wp_size, addr_masked, size)) in find_watchpoint()
148 insert_watchpoint(unsigned long addr, size_t size, bool is_write) in insert_watchpoint() argument
151 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); in insert_watchpoint()
210 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
223 check_access(scoped_access->ptr, scoped_access->size, in kcsan_check_scoped_accesses()
231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in is_atomic() argument
245 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) && in is_atomic()
246 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic()
268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in should_watch() argument
[all …]
Dreport.c31 size_t size; member
447 other_info->ai.size, get_thread_desc(other_info->ai.task_pid), in print_report()
459 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report()
463 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report()
472 if (ai->size <= 8) { in print_report()
473 int hex_len = ai->size * 2; in print_report()
504 other_info->ai.size = 0; in release_report()
565 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking()
591 WARN_ON(other_info->ai.size); in prepare_report_producer()
609 while (!other_info->ai.size) { /* Await valid @other_info. */ in prepare_report_consumer()
[all …]
/kernel/
Dstacktrace.c47 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument
55 for (i = 0; i < nr_entries && size; i++) { in stack_trace_snprint()
56 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', in stack_trace_snprint()
60 if (generated >= size) { in stack_trace_snprint()
61 buf += size; in stack_trace_snprint()
62 size = 0; in stack_trace_snprint()
65 size -= generated; in stack_trace_snprint()
77 unsigned int size; member
86 if (c->len >= c->size) in stack_trace_consume_entry()
94 return c->len < c->size; in stack_trace_consume_entry()
[all …]
Diomem.c9 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) in ioremap_cache() argument
11 return ioremap(offset, size); in ioremap_cache()
16 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) in arch_memremap_wb() argument
18 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb()
23 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, in arch_memremap_can_ram_remap() argument
30 static void *try_ram_remap(resource_size_t offset, size_t size, in try_ram_remap() argument
37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap()
71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument
73 int is_ram = region_intersects(offset, size, in memremap()
82 &offset, (unsigned long) size); in memremap()
[all …]
Dregset.c8 unsigned int size, in __regset_get() argument
16 if (size > regset->n * regset->size) in __regset_get()
17 size = regset->n * regset->size; in __regset_get()
19 to_free = p = kzalloc(size, GFP_KERNEL); in __regset_get()
24 (struct membuf){.p = p, .left = size}); in __regset_get()
30 return size - res; in __regset_get()
35 unsigned int size, in regset_get() argument
38 return __regset_get(target, regset, size, &data); in regset_get()
44 unsigned int size, in regset_get_alloc() argument
48 return __regset_get(target, regset, size, data); in regset_get_alloc()
[all …]
Dkcov.c59 unsigned int size; member
77 unsigned int size; member
134 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) in kcov_remote_area_get() argument
141 if (area->size == size) { in kcov_remote_area_get()
151 unsigned int size) in kcov_remote_area_put() argument
154 area->size = size; in kcov_remote_area_put()
320 u64 size = cases[1]; in __sanitizer_cov_trace_switch() local
323 switch (size) { in __sanitizer_cov_trace_switch()
346 unsigned int size, void *area, enum kcov_mode mode, in kcov_start() argument
349 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); in kcov_start()
[all …]
Dresource.c231 resource_size_t size; in __release_child_resources() local
245 size = resource_size(tmp); in __release_child_resources()
247 tmp->end = size - 1; in __release_child_resources()
493 size_t size, unsigned long flags, in __region_intersects() argument
501 res.end = start + size - 1; in __region_intersects()
541 int region_intersects(resource_size_t start, size_t size, unsigned long flags, in region_intersects() argument
547 ret = __region_intersects(&iomem_resource, start, size, flags, desc); in region_intersects()
560 resource_size_t size, in simple_align_resource() argument
581 resource_size_t size, in __find_resource() argument
615 size, constraint->align); in __find_resource()
[all …]
/kernel/events/
Dring_buffer.c139 unsigned long data_size, unsigned int size, in ring_buffer_has_space() argument
143 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space()
145 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space()
151 struct perf_event *event, unsigned int size, in __perf_output_begin() argument
187 size += sizeof(lost_event); in __perf_output_begin()
189 size += event->id_header_size; in __perf_output_begin()
200 size, backward))) in __perf_output_begin()
217 head += size; in __perf_output_begin()
219 head -= size; in __perf_output_begin()
240 handle->size = (1UL << page_shift) - offset; in __perf_output_begin()
[all …]
Dinternal.h94 unsigned long size, u64 flags);
136 unsigned long size, written; \
139 size = min(handle->size, len); \
141 written = size - written; \
147 handle->size -= written; \
148 if (!handle->size) { \
154 handle->size = PAGE_SIZE << page_order(rb); \
156 } while (len && written == size); \
165 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
173 orig_len - len, size) in __output_custom()
/kernel/trace/
Dtrace_events_inject.c142 int size = 0; in trace_get_entry_size() local
146 if (field->size + field->offset > size) in trace_get_entry_size()
147 size = field->size + field->offset; in trace_get_entry_size()
150 return size; in trace_get_entry_size()
153 static void *trace_alloc_entry(struct trace_event_call *call, int *size) in trace_alloc_entry() argument
177 str_loc -= field->offset + field->size; in trace_alloc_entry()
189 *size = entry_size + 1; in trace_alloc_entry()
220 strlcpy(entry + field->offset, addr, field->size); in parse_entry()
238 str_loc -= field->offset + field->size; in parse_entry()
247 switch (field->size) { in parse_entry()
[all …]
Dbpf_trace.c163 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) in bpf_probe_read_user_common() argument
167 ret = copy_from_user_nofault(dst, unsafe_ptr, size); in bpf_probe_read_user_common()
169 memset(dst, 0, size); in bpf_probe_read_user_common()
173 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, in BPF_CALL_3() argument
176 return bpf_probe_read_user_common(dst, size, unsafe_ptr); in BPF_CALL_3()
189 bpf_probe_read_user_str_common(void *dst, u32 size, in bpf_probe_read_user_str_common() argument
204 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); in bpf_probe_read_user_str_common()
206 memset(dst, 0, size); in bpf_probe_read_user_str_common()
210 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, in BPF_CALL_3() argument
213 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); in BPF_CALL_3()
[all …]
/kernel/debug/kdb/
Dkdb_support.c266 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument
268 int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size); in kdb_getarea_size()
291 int kdb_putarea_size(unsigned long addr, void *res, size_t size) in kdb_putarea_size() argument
293 int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size); in kdb_putarea_size()
317 static int kdb_getphys(void *res, unsigned long addr, size_t size) in kdb_getphys() argument
328 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys()
343 int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) in kdb_getphysword() argument
352 switch (size) { in kdb_getphysword()
369 if (size <= sizeof(*word)) { in kdb_getphysword()
378 kdb_func_printf("bad width %zu\n", size); in kdb_getphysword()
[all …]
/kernel/module/
Dlivepatch.c20 unsigned int size, symndx; in copy_module_elf() local
23 size = sizeof(*mod->klp_info); in copy_module_elf()
24 mod->klp_info = kmalloc(size, GFP_KERNEL); in copy_module_elf()
29 size = sizeof(mod->klp_info->hdr); in copy_module_elf()
30 memcpy(&mod->klp_info->hdr, info->hdr, size); in copy_module_elf()
33 size = sizeof(*info->sechdrs) * info->hdr->e_shnum; in copy_module_elf()
34 mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); in copy_module_elf()
41 size = info->sechdrs[info->hdr->e_shstrndx].sh_size; in copy_module_elf()
42 mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); in copy_module_elf()
Dkallsyms.c119 symsect->sh_entsize = module_get_offset(mod, &mod->init_layout.size, symsect, in layout_symtab()
137 info->symoffs = ALIGN(mod->data_layout.size, symsect->sh_addralign ?: 1); in layout_symtab()
138 info->stroffs = mod->data_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); in layout_symtab()
139 mod->data_layout.size += strtab_size; in layout_symtab()
141 info->core_typeoffs = mod->data_layout.size; in layout_symtab()
142 mod->data_layout.size += ndst * sizeof(char); in layout_symtab()
143 mod->data_layout.size = strict_align(mod->data_layout.size); in layout_symtab()
147 strsect->sh_entsize = module_get_offset(mod, &mod->init_layout.size, strsect, in layout_symtab()
152 mod->init_layout.size = ALIGN(mod->init_layout.size, in layout_symtab()
154 info->mod_kallsyms_init_off = mod->init_layout.size; in layout_symtab()
[all …]
/kernel/bpf/
Dlocal_storage.c188 u32 size; in bpf_percpu_cgroup_storage_copy() local
201 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy()
204 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy()
205 off += size; in bpf_percpu_cgroup_storage_copy()
217 u32 size; in bpf_percpu_cgroup_storage_update() local
235 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update()
238 value + off, size); in bpf_percpu_cgroup_storage_update()
239 off += size; in bpf_percpu_cgroup_storage_update()
363 u32 offset, size; in cgroup_storage_check_btf() local
384 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); in cgroup_storage_check_btf()
[all …]

123456