/kernel/dma/ |
D | mapping.c | 23 size_t size; member 33 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release() 42 WARN_ON(this->size != match->size || in dmam_match() 58 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument 61 struct dma_devres match_data = { size, vaddr, dma_handle }; in dmam_free_coherent() 63 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent() 82 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument 92 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs() 100 dr->size = size; in dmam_alloc_attrs() 141 size_t offset, size_t size, enum dma_data_direction dir, in dma_map_page_attrs() argument [all …]
|
D | direct.c | 69 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument 75 return dma_addr + size - 1 <= in dma_coherent_ok() 79 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument 86 WARN_ON_ONCE(!PAGE_ALIGNED(size)); in __dma_direct_alloc_pages() 90 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages() 91 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages() 92 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages() 97 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages() 98 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages() 99 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages() [all …]
|
D | coherent.c | 17 int size; member 41 dma_addr_t device_addr, size_t size, in dma_init_coherent_memory() argument 46 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() 50 if (!size) { in dma_init_coherent_memory() 55 mem_base = memremap(phys_addr, size, MEMREMAP_WC); in dma_init_coherent_memory() 74 dma_mem->size = pages; in dma_init_coherent_memory() 128 dma_addr_t device_addr, size_t size) in dma_declare_coherent_memory() argument 133 ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); in dma_declare_coherent_memory() 153 ssize_t size, dma_addr_t *dma_handle) in __dma_alloc_from_coherent() argument 155 int order = get_order(size); in __dma_alloc_from_coherent() [all …]
|
D | direct.h | 13 void *cpu_addr, dma_addr_t dma_addr, size_t size, 17 void *cpu_addr, dma_addr_t dma_addr, size_t size, 55 dma_addr_t addr, size_t size, enum dma_data_direction dir) in dma_direct_sync_single_for_device() argument 60 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); in dma_direct_sync_single_for_device() 63 arch_sync_dma_for_device(paddr, size, dir); in dma_direct_sync_single_for_device() 67 dma_addr_t addr, size_t size, enum dma_data_direction dir) in dma_direct_sync_single_for_cpu() argument 72 arch_sync_dma_for_cpu(paddr, size, dir); in dma_direct_sync_single_for_cpu() 77 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); in dma_direct_sync_single_for_cpu() 80 arch_dma_mark_clean(paddr, size); in dma_direct_sync_single_for_cpu() 84 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument [all …]
|
D | debug.h | 13 size_t offset, size_t size, 17 size_t size, int direction); 25 extern void debug_dma_alloc_coherent(struct device *dev, size_t size, 28 extern void debug_dma_free_coherent(struct device *dev, size_t size, 32 size_t size, int direction, 36 size_t size, int direction); 39 dma_addr_t dma_handle, size_t size, 44 size_t size, int direction); 55 size_t offset, size_t size, in debug_dma_map_page() argument 61 size_t size, int direction) in debug_dma_unmap_page() argument [all …]
|
D | ops_helpers.c | 19 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument 27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable() 35 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument 40 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_mmap() 47 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap() 61 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages() argument 67 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 69 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages() 73 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 76 dma_free_contiguous(dev, page, size); in dma_common_alloc_pages() [all …]
|
D | contiguous.c | 208 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument 229 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, in dma_contiguous_reserve_area() argument 235 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area() 285 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument 287 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); in cma_alloc_aligned() 289 return cma_alloc(cma, size >> PAGE_SHIFT, align, in cma_alloc_aligned() 308 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument 319 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 321 if (size <= PAGE_SIZE) { in dma_alloc_contiguous() 322 trace_android_vh_subpage_dma_contig_alloc(&allow_subpage_alloc, dev, &size); in dma_alloc_contiguous() [all …]
|
D | pool.c | 49 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument 52 pool_size_dma += size; in dma_atomic_pool_size_add() 54 pool_size_dma32 += size; in dma_atomic_pool_size_add() 56 pool_size_kernel += size; in dma_atomic_pool_size_add() 61 unsigned long size; in cma_in_zone() local 69 size = cma_get_size(cma); in cma_in_zone() 70 if (!size) in cma_in_zone() 74 end = cma_get_base(cma) + size - 1; in cma_in_zone() 243 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size, in __dma_alloc_from_pool() argument 250 addr = gen_pool_alloc(pool, size); in __dma_alloc_from_pool() [all …]
|
D | debug.c | 70 u64 size; member 283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) in containing_match() 315 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find() 420 entry->dev_addr, entry->size, in debug_dma_dump_mappings() 797 entry->dev_addr, entry->size, in dump_show() 865 count, entry->dev_addr, entry->size, in dma_debug_device_change() 978 ref->dev_addr, ref->size); in check_unmap() 983 if (ref->size != entry->size) { in check_unmap() 988 ref->dev_addr, entry->size, ref->size); in check_unmap() 996 ref->dev_addr, ref->size, in check_unmap() [all …]
|
D | swiotlb.c | 156 unsigned long size; in swiotlb_size_or_default() local 158 size = io_tlb_nslabs << IO_TLB_SHIFT; in swiotlb_size_or_default() 160 return size ? size : (IO_TLB_DEFAULT_SIZE); in swiotlb_size_or_default() 419 size_t size, enum dma_data_direction dir) in swiotlb_bounce() argument 431 while (size) { in swiotlb_bounce() 432 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce() 443 size -= sz; in swiotlb_bounce() 449 memcpy(vaddr, phys_to_virt(orig_addr), size); in swiotlb_bounce() 451 memcpy(phys_to_virt(orig_addr), vaddr, size); in swiotlb_bounce() 669 size_t size, enum dma_data_direction dir, in swiotlb_tbl_sync_single() argument [all …]
|
/kernel/ |
D | stacktrace.c | 46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument 54 for (i = 0; i < nr_entries && size; i++) { in stack_trace_snprint() 55 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', in stack_trace_snprint() 59 if (generated >= size) { in stack_trace_snprint() 60 buf += size; in stack_trace_snprint() 61 size = 0; in stack_trace_snprint() 64 size -= generated; in stack_trace_snprint() 76 unsigned int size; member 85 if (c->len >= c->size) in stack_trace_consume_entry() 93 return c->len < c->size; in stack_trace_consume_entry() [all …]
|
D | iomem.c | 9 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) in ioremap_cache() argument 11 return ioremap(offset, size); in ioremap_cache() 16 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) in arch_memremap_wb() argument 18 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb() 23 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, in arch_memremap_can_ram_remap() argument 30 static void *try_ram_remap(resource_size_t offset, size_t size, in try_ram_remap() argument 37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap() 71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument 73 int is_ram = region_intersects(offset, size, in memremap() 82 &offset, (unsigned long) size); in memremap() [all …]
|
D | regset.c | 8 unsigned int size, in __regset_get() argument 16 if (size > regset->n * regset->size) in __regset_get() 17 size = regset->n * regset->size; in __regset_get() 19 to_free = p = kzalloc(size, GFP_KERNEL); in __regset_get() 24 (struct membuf){.p = p, .left = size}); in __regset_get() 30 return size - res; in __regset_get() 35 unsigned int size, in regset_get() argument 38 return __regset_get(target, regset, size, &data); in regset_get() 44 unsigned int size, in regset_get_alloc() argument 48 return __regset_get(target, regset, size, data); in regset_get_alloc() [all …]
|
D | kcov.c | 58 unsigned int size; member 76 unsigned int size; member 130 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) in kcov_remote_area_get() argument 137 if (area->size == size) { in kcov_remote_area_get() 147 unsigned int size) in kcov_remote_area_put() argument 150 area->size = size; in kcov_remote_area_put() 300 u64 size = cases[1]; in __sanitizer_cov_trace_switch() local 303 switch (size) { in __sanitizer_cov_trace_switch() 326 unsigned int size, void *area, enum kcov_mode mode, in kcov_start() argument 329 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); in kcov_start() [all …]
|
D | module.c | 124 return (unsigned long)layout->size; in __mod_tree_size() 186 if (mod->init_layout.size) in mod_tree_insert() 192 if (mod->init_layout.size) in mod_tree_remove_init() 240 static void __mod_update_bounds(void *base, unsigned int size) in __mod_update_bounds() argument 243 unsigned long max = min + size; in __mod_update_bounds() 253 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); in mod_update_bounds() 254 if (mod->init_layout.size) in mod_update_bounds() 255 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); in mod_update_bounds() 694 const void *from, unsigned long size) in percpu_modcopy() argument 699 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); in percpu_modcopy() [all …]
|
/kernel/kcsan/ |
D | core.c | 108 size_t size, in find_watchpoint() argument 133 if (matching_access(wp_addr_masked, wp_size, addr_masked, size)) in find_watchpoint() 141 insert_watchpoint(unsigned long addr, size_t size, bool is_write) in insert_watchpoint() argument 144 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); in insert_watchpoint() 211 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type); in kcsan_check_scoped_accesses() 217 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) in is_atomic() argument 231 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) && in is_atomic() 232 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic() 254 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) in should_watch() argument 263 if (is_atomic(ptr, size, type, ctx)) in should_watch() [all …]
|
D | report.c | 25 size_t size; member 390 other_info->ai.size, get_thread_desc(other_info->ai.task_pid), in print_report() 403 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 409 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 439 other_info->ai.size = 0; in release_report() 501 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking() 527 WARN_ON(other_info->ai.size); in prepare_report_producer() 545 while (!other_info->ai.size) { /* Await valid @other_info. */ in prepare_report_consumer() 552 …_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size, in prepare_report_consumer() 553 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size))) in prepare_report_consumer() [all …]
|
/kernel/debug/kdb/ |
D | kdb_support.c | 326 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument 328 int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size); in kdb_getarea_size() 351 int kdb_putarea_size(unsigned long addr, void *res, size_t size) in kdb_putarea_size() argument 353 int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size); in kdb_putarea_size() 377 static int kdb_getphys(void *res, unsigned long addr, size_t size) in kdb_getphys() argument 388 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys() 403 int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) in kdb_getphysword() argument 412 switch (size) { in kdb_getphysword() 429 if (size <= sizeof(*word)) { in kdb_getphysword() 438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); in kdb_getphysword() [all …]
|
/kernel/events/ |
D | ring_buffer.c | 139 unsigned long data_size, unsigned int size, in ring_buffer_has_space() argument 143 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space() 145 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space() 151 struct perf_event *event, unsigned int size, in __perf_output_begin() argument 185 size += sizeof(lost_event); in __perf_output_begin() 187 size += event->id_header_size; in __perf_output_begin() 198 size, backward))) in __perf_output_begin() 215 head += size; in __perf_output_begin() 217 head -= size; in __perf_output_begin() 238 handle->size = (1UL << page_shift) - offset; in __perf_output_begin() [all …]
|
D | internal.h | 94 unsigned long size, u64 flags); 136 unsigned long size, written; \ 139 size = min(handle->size, len); \ 141 written = size - written; \ 147 handle->size -= written; \ 148 if (!handle->size) { \ 154 handle->size = PAGE_SIZE << page_order(rb); \ 156 } while (len && written == size); \ 165 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size) 173 orig_len - len, size) in __output_custom()
|
/kernel/trace/ |
D | bpf_trace.c | 150 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) in bpf_probe_read_user_common() argument 154 ret = copy_from_user_nofault(dst, unsafe_ptr, size); in bpf_probe_read_user_common() 156 memset(dst, 0, size); in bpf_probe_read_user_common() 160 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, in BPF_CALL_3() argument 163 return bpf_probe_read_user_common(dst, size, unsafe_ptr); in BPF_CALL_3() 176 bpf_probe_read_user_str_common(void *dst, u32 size, in bpf_probe_read_user_str_common() argument 191 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); in bpf_probe_read_user_str_common() 193 memset(dst, 0, size); in bpf_probe_read_user_str_common() 197 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, in BPF_CALL_3() argument 200 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); in BPF_CALL_3() [all …]
|
D | trace_events_inject.c | 142 int size = 0; in trace_get_entry_size() local 146 if (field->size + field->offset > size) in trace_get_entry_size() 147 size = field->size + field->offset; in trace_get_entry_size() 150 return size; in trace_get_entry_size() 153 static void *trace_alloc_entry(struct trace_event_call *call, int *size) in trace_alloc_entry() argument 185 *size = entry_size + 1; in trace_alloc_entry() 218 strlcpy(entry + field->offset, addr, field->size); in parse_entry() 242 switch (field->size) { in parse_entry() 284 int err = -ENODEV, size; in event_inject_write() local 300 size = parse_entry(buf, call, &entry); in event_inject_write() [all …]
|
D | trace_events_synth.c | 116 unsigned int i, size, n_u64; in synth_event_define_fields() local 122 size = event->fields[i]->size; in synth_event_define_fields() 126 ret = trace_define_field(call, type, name, offset, size, in synth_event_define_fields() 169 int size, err; in synth_field_string_size() local 190 err = kstrtouint(buf, 0, &size); in synth_field_string_size() 194 if (size > STR_VAR_LEN_MAX) in synth_field_string_size() 197 return size; in synth_field_string_size() 202 int size = 0; in synth_field_size() local 205 size = sizeof(s64); in synth_field_size() 207 size = sizeof(u64); in synth_field_size() [all …]
|
/kernel/bpf/ |
D | local_storage.c | 190 u32 size; in bpf_percpu_cgroup_storage_copy() local 203 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy() 206 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy() 207 off += size; in bpf_percpu_cgroup_storage_copy() 219 u32 size; in bpf_percpu_cgroup_storage_update() local 237 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update() 240 value + off, size); in bpf_percpu_cgroup_storage_update() 241 off += size; in bpf_percpu_cgroup_storage_update() 368 u32 offset, size; in cgroup_storage_check_btf() local 389 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); in cgroup_storage_check_btf() [all …]
|
D | queue_stack_maps.c | 20 u32 size; /* max_entries + 1 */ member 39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 72 u64 size, queue_size, cost; in queue_stack_map_alloc() local 74 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc() 75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 92 qs->size = size; in queue_stack_map_alloc() 131 if (unlikely(++qs->tail >= qs->size)) in __queue_map_get() 163 if (unlikely(index >= qs->size)) in __stack_map_get() 164 index = qs->size - 1; in __stack_map_get() 232 if (unlikely(++qs->tail >= qs->size)) in queue_stack_map_push_elem() [all …]
|