| /kernel/dma/ |
| D | mapping.c | 34 size_t size; member 44 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release() 53 WARN_ON(this->size != match->size || in dmam_match() 69 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument 72 struct dma_devres match_data = { size, vaddr, dma_handle }; in dmam_free_coherent() 75 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent() 93 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument 103 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs() 111 dr->size = size; in dmam_alloc_attrs() 156 size_t offset, size_t size, enum dma_data_direction dir, in dma_map_page_attrs() argument [all …]
|
| D | direct.c | 71 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument 77 return dma_addr + size - 1 <= in dma_coherent_ok() 81 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument 85 return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_decrypted() 88 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument 94 ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_encrypted() 101 size_t size) in __dma_direct_free_pages() argument 103 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages() 105 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages() 108 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() argument [all …]
|
| D | coherent.c | 17 int size; member 39 dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) in dma_init_coherent_memory() argument 42 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() 45 if (!size) in dma_init_coherent_memory() 48 mem_base = memremap(phys_addr, size, MEMREMAP_WC); in dma_init_coherent_memory() 62 dma_mem->size = pages; in dma_init_coherent_memory() 73 &phys_addr, size / SZ_1M); in dma_init_coherent_memory() 118 dma_addr_t device_addr, size_t size) in dma_declare_coherent_memory() argument 123 mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); in dma_declare_coherent_memory() 143 ssize_t size, dma_addr_t *dma_handle) in __dma_alloc_from_coherent() argument [all …]
|
| D | debug.h | 13 size_t offset, size_t size, 18 size_t size, int direction); 27 extern void debug_dma_alloc_coherent(struct device *dev, size_t size, 31 extern void debug_dma_free_coherent(struct device *dev, size_t size, 35 size_t size, int direction, 40 size_t size, int direction); 43 dma_addr_t dma_handle, size_t size, 48 size_t size, int direction); 58 size_t size, int direction, 62 size_t size, int direction, [all …]
|
| D | direct.h | 14 void *cpu_addr, dma_addr_t dma_addr, size_t size, 18 void *cpu_addr, dma_addr_t dma_addr, size_t size, 57 dma_addr_t addr, size_t size, enum dma_data_direction dir) in dma_direct_sync_single_for_device() argument 61 swiotlb_sync_single_for_device(dev, paddr, size, dir); in dma_direct_sync_single_for_device() 64 arch_sync_dma_for_device(paddr, size, dir); in dma_direct_sync_single_for_device() 68 dma_addr_t addr, size_t size, enum dma_data_direction dir) in dma_direct_sync_single_for_cpu() argument 73 arch_sync_dma_for_cpu(paddr, size, dir); in dma_direct_sync_single_for_cpu() 77 swiotlb_sync_single_for_cpu(dev, paddr, size, dir); in dma_direct_sync_single_for_cpu() 80 arch_dma_mark_clean(paddr, size); in dma_direct_sync_single_for_cpu() 84 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument [all …]
|
| D | ops_helpers.c | 20 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument 28 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable() 36 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument 41 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_mmap() 48 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap() 62 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages() argument 68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages() 75 *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 78 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() [all …]
|
| D | debug.c | 72 u64 size; member 285 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) in containing_match() 317 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find() 536 entry->size, &cln, in debug_dma_dump_mappings() 569 entry->size, &cln, in dump_show() 865 count, entry->dev_addr, entry->size, in dma_debug_device_change() 978 ref->dev_addr, ref->size); in check_unmap() 983 if (ref->size != entry->size) { in check_unmap() 988 ref->dev_addr, entry->size, ref->size); in check_unmap() 996 ref->dev_addr, ref->size, in check_unmap() [all …]
|
| D | pool.c | 46 static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) in dma_atomic_pool_size_add() argument 49 pool_size_dma += size; in dma_atomic_pool_size_add() 51 pool_size_dma32 += size; in dma_atomic_pool_size_add() 53 pool_size_kernel += size; in dma_atomic_pool_size_add() 58 unsigned long size; in cma_in_zone() local 66 size = cma_get_size(cma); in cma_in_zone() 67 if (!size) in cma_in_zone() 71 end = cma_get_base(cma) + size - 1; in cma_in_zone() 240 static struct page *__dma_alloc_from_pool(struct device *dev, size_t size, in __dma_alloc_from_pool() argument 247 addr = gen_pool_alloc(pool, size); in __dma_alloc_from_pool() [all …]
|
| D | contiguous.c | 253 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument 274 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, in dma_contiguous_reserve_area() argument 281 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area() 334 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument 336 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); in cma_alloc_aligned() 338 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); in cma_alloc_aligned() 356 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument 366 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 367 if (size <= PAGE_SIZE) in dma_alloc_contiguous() 376 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() [all …]
|
| /kernel/kcsan/ |
| D | core.c | 115 size_t size, in find_watchpoint() argument 140 if (matching_access(wp_addr_masked, wp_size, addr_masked, size)) in find_watchpoint() 148 insert_watchpoint(unsigned long addr, size_t size, bool is_write) in insert_watchpoint() argument 151 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); in insert_watchpoint() 210 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); 223 check_access(scoped_access->ptr, scoped_access->size, in kcsan_check_scoped_accesses() 231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in is_atomic() argument 245 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) && in is_atomic() 246 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic() 268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in should_watch() argument [all …]
|
| D | report.c | 31 size_t size; member 447 other_info->ai.size, get_thread_desc(other_info->ai.task_pid), in print_report() 459 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 463 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 472 if (ai->size <= 8) { in print_report() 473 int hex_len = ai->size * 2; in print_report() 504 other_info->ai.size = 0; in release_report() 565 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking() 591 WARN_ON(other_info->ai.size); in prepare_report_producer() 609 while (!other_info->ai.size) { /* Await valid @other_info. */ in prepare_report_consumer() [all …]
|
| /kernel/ |
| D | stacktrace.c | 47 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument 55 for (i = 0; i < nr_entries && size; i++) { in stack_trace_snprint() 56 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', in stack_trace_snprint() 60 if (generated >= size) { in stack_trace_snprint() 61 buf += size; in stack_trace_snprint() 62 size = 0; in stack_trace_snprint() 65 size -= generated; in stack_trace_snprint() 77 unsigned int size; member 86 if (c->len >= c->size) in stack_trace_consume_entry() 94 return c->len < c->size; in stack_trace_consume_entry() [all …]
|
| D | iomem.c | 9 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) in arch_memremap_wb() argument 12 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb() 14 return (__force void *)ioremap(offset, size); in arch_memremap_wb() 20 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, in arch_memremap_can_ram_remap() argument 27 static void *try_ram_remap(resource_size_t offset, size_t size, in try_ram_remap() argument 34 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap() 68 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument 70 int is_ram = region_intersects(offset, size, in memremap() 79 &offset, (unsigned long) size); in memremap() 92 addr = try_ram_remap(offset, size, flags); in memremap() [all …]
|
| D | regset.c | 8 unsigned int size, in __regset_get() argument 16 if (size > regset->n * regset->size) in __regset_get() 17 size = regset->n * regset->size; in __regset_get() 19 to_free = p = kvzalloc(size, GFP_KERNEL); in __regset_get() 24 (struct membuf){.p = p, .left = size}); in __regset_get() 30 return size - res; in __regset_get() 35 unsigned int size, in regset_get() argument 38 return __regset_get(target, regset, size, &data); in regset_get() 44 unsigned int size, in regset_get_alloc() argument 48 return __regset_get(target, regset, size, data); in regset_get_alloc() [all …]
|
| D | kcov.c | 60 unsigned int size; member 78 unsigned int size; member 135 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) in kcov_remote_area_get() argument 142 if (area->size == size) { in kcov_remote_area_get() 152 unsigned int size) in kcov_remote_area_put() argument 155 area->size = size; in kcov_remote_area_put() 331 u64 size = cases[1]; in __sanitizer_cov_trace_switch() local 334 switch (size) { in __sanitizer_cov_trace_switch() 357 unsigned int size, void *area, enum kcov_mode mode, in kcov_start() argument 360 kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); in kcov_start() [all …]
|
| D | resource.c | 223 resource_size_t size; in __release_child_resources() local 237 size = resource_size(tmp); in __release_child_resources() 239 tmp->end = size - 1; in __release_child_resources() 537 size_t size, unsigned long flags, in __region_intersects() argument 547 res.end = start + size - 1; in __region_intersects() 627 int region_intersects(resource_size_t start, size_t size, unsigned long flags, in region_intersects() argument 633 ret = __region_intersects(&iomem_resource, start, size, flags, desc); in region_intersects() 658 struct resource *new, resource_size_t size, in __find_resource_space() argument 694 &avail, size, constraint->align); in __find_resource_space() 698 alloc.end = alloc.start + size - 1; in __find_resource_space() [all …]
|
| /kernel/module/ |
| D | stats.c | 277 unsigned int len, size, count_failed = 0; in read_file_mod_stats() local 299 size = MAX_PREAMBLE + min((unsigned int)(floads + fbecoming), in read_file_mod_stats() 301 buf = kzalloc(size, GFP_KERNEL); in read_file_mod_stats() 306 len = scnprintf(buf, size, "%25s\t%u\n", "Mods ever loaded", live_mod_count); in read_file_mod_stats() 308 len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on kread", fkreads); in read_file_mod_stats() 310 len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on decompress", in read_file_mod_stats() 312 len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on becoming", fbecoming); in read_file_mod_stats() 314 len += scnprintf(buf + len, size - len, "%25s\t%u\n", "Mods failed on load", floads); in read_file_mod_stats() 316 len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total module size", total_size); in read_file_mod_stats() 317 len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Total mod text size", text_size); in read_file_mod_stats() [all …]
|
| D | decompress.c | 63 static size_t module_gzip_header_len(const u8 *buf, size_t size) in module_gzip_header_len() argument 68 if (size < len || memcmp(buf, signature, sizeof(signature))) in module_gzip_header_len() 77 if (len == size) in module_gzip_header_len() 86 const void *buf, size_t size) in module_gzip_decompress() argument 94 gzip_hdr_len = module_gzip_header_len(buf, size); in module_gzip_decompress() 101 s.avail_in = size - gzip_hdr_len; in module_gzip_decompress() 150 const void *buf, size_t size) in module_xz_decompress() argument 159 if (size < sizeof(signature) || in module_xz_decompress() 169 xz_buf.in_size = size; in module_xz_decompress() 208 const void *buf, size_t size) in module_zstd_decompress() argument [all …]
|
| D | livepatch.c | 20 unsigned int size, symndx; in copy_module_elf() local 23 size = sizeof(*mod->klp_info); in copy_module_elf() 24 mod->klp_info = kmalloc(size, GFP_KERNEL); in copy_module_elf() 29 size = sizeof(mod->klp_info->hdr); in copy_module_elf() 30 memcpy(&mod->klp_info->hdr, info->hdr, size); in copy_module_elf() 33 size = sizeof(*info->sechdrs) * info->hdr->e_shnum; in copy_module_elf() 34 mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); in copy_module_elf() 41 size = info->sechdrs[info->hdr->e_shstrndx].sh_size; in copy_module_elf() 42 mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); in copy_module_elf()
|
| /kernel/events/ |
| D | ring_buffer.c | 143 unsigned long data_size, unsigned int size, in ring_buffer_has_space() argument 147 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space() 149 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space() 155 struct perf_event *event, unsigned int size, in __perf_output_begin() argument 191 size += sizeof(lost_event); in __perf_output_begin() 193 size += event->id_header_size; in __perf_output_begin() 205 size, backward))) in __perf_output_begin() 222 head += size; in __perf_output_begin() 224 head -= size; in __perf_output_begin() 245 handle->size = (1UL << page_shift) - offset; in __perf_output_begin() [all …]
|
| D | internal.h | 96 unsigned long size, u64 flags); 138 unsigned long size, written; \ 141 size = min(handle->size, len); \ 143 written = size - written; \ 149 handle->size -= written; \ 150 if (!handle->size) { \ 156 handle->size = PAGE_SIZE << page_order(rb); \ 158 } while (len && written == size); \ 167 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size) 175 orig_len - len, size) in __output_custom()
|
| /kernel/trace/ |
| D | trace_events_inject.c | 142 int size = 0; in trace_get_entry_size() local 146 if (field->size + field->offset > size) in trace_get_entry_size() 147 size = field->size + field->offset; in trace_get_entry_size() 150 return size; in trace_get_entry_size() 153 static void *trace_alloc_entry(struct trace_event_call *call, int *size) in trace_alloc_entry() argument 177 str_loc -= field->offset + field->size; in trace_alloc_entry() 189 *size = entry_size + 1; in trace_alloc_entry() 220 strscpy(entry + field->offset, addr, field->size); in parse_entry() 238 str_loc -= field->offset + field->size; in parse_entry() 247 switch (field->size) { in parse_entry() [all …]
|
| /kernel/debug/kdb/ |
| D | kdb_support.c | 266 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument 268 int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size); in kdb_getarea_size() 291 int kdb_putarea_size(unsigned long addr, void *res, size_t size) in kdb_putarea_size() argument 293 int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size); in kdb_putarea_size() 317 static int kdb_getphys(void *res, unsigned long addr, size_t size) in kdb_getphys() argument 328 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys() 343 int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) in kdb_getphysword() argument 352 switch (size) { in kdb_getphysword() 369 if (size <= sizeof(*word)) { in kdb_getphysword() 378 kdb_func_printf("bad width %zu\n", size); in kdb_getphysword() [all …]
|
| /kernel/bpf/ |
| D | local_storage.c | 188 u32 size; in bpf_percpu_cgroup_storage_copy() local 201 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy() 204 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy() 205 off += size; in bpf_percpu_cgroup_storage_copy() 217 u32 size; in bpf_percpu_cgroup_storage_update() local 235 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update() 238 value + off, size); in bpf_percpu_cgroup_storage_update() 239 off += size; in bpf_percpu_cgroup_storage_update() 363 u32 offset, size; in cgroup_storage_check_btf() local 384 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); in cgroup_storage_check_btf() [all …]
|
| D | queue_stack_maps.c | 20 u32 size; /* max_entries + 1 */ member 39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 68 u64 size, queue_size; in queue_stack_map_alloc() local 70 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc() 71 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 79 qs->size = size; in queue_stack_map_alloc() 118 if (unlikely(++qs->tail >= qs->size)) in __queue_map_get() 150 if (unlikely(index >= qs->size)) in __stack_map_get() 151 index = qs->size - 1; in __stack_map_get() 219 if (unlikely(++qs->tail >= qs->size)) in queue_stack_map_push_elem() [all …]
|