Home
last modified time | relevance | path

Searched refs:size (Results 1 – 25 of 106) sorted by relevance

12345

/kernel/
Dstacktrace.c46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, in stack_trace_snprint() argument
54 for (i = 0; i < nr_entries && size; i++) { in stack_trace_snprint()
55 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', in stack_trace_snprint()
59 if (generated >= size) { in stack_trace_snprint()
60 buf += size; in stack_trace_snprint()
61 size = 0; in stack_trace_snprint()
64 size -= generated; in stack_trace_snprint()
76 unsigned int size; member
86 if (c->len >= c->size) in stack_trace_consume_entry()
94 return c->len < c->size; in stack_trace_consume_entry()
[all …]
Diomem.c9 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) in ioremap_cache() argument
11 return ioremap(offset, size); in ioremap_cache()
16 static void *arch_memremap_wb(resource_size_t offset, unsigned long size) in arch_memremap_wb() argument
18 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb()
23 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, in arch_memremap_can_ram_remap() argument
30 static void *try_ram_remap(resource_size_t offset, size_t size, in try_ram_remap() argument
37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap()
71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument
73 int is_ram = region_intersects(offset, size, in memremap()
82 &offset, (unsigned long) size); in memremap()
[all …]
Dkcov.c58 unsigned int size; member
76 unsigned int size; member
117 static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) in kcov_remote_area_get() argument
122 kcov_debug("size = %u\n", size); in kcov_remote_area_get()
125 if (area->size == size) { in kcov_remote_area_get()
137 unsigned int size) in kcov_remote_area_put() argument
139 kcov_debug("area = %px, size = %u\n", area, size); in kcov_remote_area_put()
141 area->size = size; in kcov_remote_area_put()
290 u64 size = cases[1]; in __sanitizer_cov_trace_switch() local
293 switch (size) { in __sanitizer_cov_trace_switch()
[all …]
Dresource.c245 resource_size_t size; in __release_child_resources() local
259 size = resource_size(tmp); in __release_child_resources()
261 tmp->end = size - 1; in __release_child_resources()
536 int region_intersects(resource_size_t start, size_t size, unsigned long flags, in region_intersects() argument
544 res.end = start + size - 1; in region_intersects()
573 resource_size_t size, in simple_align_resource() argument
594 resource_size_t size, in __find_resource() argument
628 size, constraint->align); in __find_resource()
629 alloc.end = alloc.start + size - 1; in __find_resource()
652 resource_size_t size, in find_resource() argument
[all …]
Dmodule.c116 return (unsigned long)layout->size; in __mod_tree_size()
178 if (mod->init_layout.size) in mod_tree_insert()
184 if (mod->init_layout.size) in mod_tree_remove_init()
231 static void __mod_update_bounds(void *base, unsigned int size) in __mod_update_bounds() argument
234 unsigned long max = min + size; in __mod_update_bounds()
244 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); in mod_update_bounds()
245 if (mod->init_layout.size) in mod_update_bounds()
246 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); in mod_update_bounds()
677 const void *from, unsigned long size) in percpu_modcopy() argument
682 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); in percpu_modcopy()
[all …]
Dtaskstats.c70 size_t size) in prepare_reply() argument
78 skb = genlmsg_new(size, GFP_KERNEL); in prepare_reply()
395 size_t size; in cgroupstats_user_cmd() local
408 size = nla_total_size(sizeof(struct cgroupstats)); in cgroupstats_user_cmd()
411 size); in cgroupstats_user_cmd()
473 size_t size; in taskstats_packet_size() local
475 size = nla_total_size(sizeof(u32)) + in taskstats_packet_size()
479 return size; in taskstats_packet_size()
486 size_t size; in cmd_attr_pid() local
490 size = taskstats_packet_size(); in cmd_attr_pid()
[all …]
/kernel/dma/
Ddirect.c26 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size) in report_addr() argument
33 &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask); in report_addr()
79 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
81 return phys_to_dma_direct(dev, phys) + size - 1 <= in dma_coherent_ok()
85 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
88 size_t alloc_size = PAGE_ALIGN(size); in __dma_direct_alloc_pages()
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
109 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
128 void *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
[all …]
Dremap.c24 size_t size, pgprot_t prot, const void *caller) in __dma_common_pages_remap() argument
28 area = get_vm_area_caller(size, VM_DMA_COHERENT, caller); in __dma_common_pages_remap()
44 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
49 area = __dma_common_pages_remap(pages, size, prot, caller); in dma_common_pages_remap()
62 void *dma_common_contiguous_remap(struct page *page, size_t size, in dma_common_contiguous_remap() argument
69 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); in dma_common_contiguous_remap()
73 for (i = 0; i < (size >> PAGE_SHIFT); i++) in dma_common_contiguous_remap()
76 area = __dma_common_pages_remap(pages, size, prot, caller); in dma_common_contiguous_remap()
88 void dma_common_free_remap(void *cpu_addr, size_t size) in dma_common_free_remap() argument
97 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); in dma_common_free_remap()
[all …]
Dmapping.c22 size_t size; member
32 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
41 WARN_ON(this->size != match->size || in dmam_match()
57 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument
60 struct dma_devres match_data = { size, vaddr, dma_handle }; in dmam_free_coherent()
62 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent()
81 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument
91 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
99 dr->size = size; in dmam_alloc_attrs()
112 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument
[all …]
Dcoherent.c16 int size; member
41 dma_addr_t device_addr, size_t size, in dma_init_coherent_memory() argument
46 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory()
50 if (!size) { in dma_init_coherent_memory()
55 mem_base = memremap(phys_addr, size, MEMREMAP_WC); in dma_init_coherent_memory()
74 dma_mem->size = pages; in dma_init_coherent_memory()
111 dma_addr_t device_addr, size_t size) in dma_declare_coherent_memory() argument
116 ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); in dma_declare_coherent_memory()
128 ssize_t size, dma_addr_t *dma_handle) in __dma_alloc_from_coherent() argument
130 int order = get_order(size); in __dma_alloc_from_coherent()
[all …]
Ddebug.c74 u64 size; member
279 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) in containing_match()
311 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find()
416 entry->dev_addr, entry->size, in debug_dma_dump_mappings()
836 entry->dev_addr, entry->size, in dump_show()
901 count, entry->dev_addr, entry->size, in dma_debug_device_change()
1016 ref->dev_addr, ref->size); in check_unmap()
1021 if (ref->size != entry->size) { in check_unmap()
1026 ref->dev_addr, entry->size, ref->size); in check_unmap()
1034 ref->dev_addr, ref->size, in check_unmap()
[all …]
Dcontiguous.c162 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, in dma_contiguous_reserve_area() argument
168 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area()
231 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument
233 size_t count = size >> PAGE_SHIFT; in dma_alloc_contiguous()
244 size_t align = get_order(size); in dma_alloc_contiguous()
264 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument
267 PAGE_ALIGN(size) >> PAGE_SHIFT)) in dma_free_contiguous()
268 __free_pages(page, get_order(size)); in dma_free_contiguous()
311 if ((rmem->base & mask) || (rmem->size & mask)) { in rmem_cma_setup()
316 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup()
[all …]
Dswiotlb.c158 unsigned long size; in swiotlb_size_or_default() local
160 size = io_tlb_nslabs << IO_TLB_SHIFT; in swiotlb_size_or_default()
162 return size ? size : (IO_TLB_DEFAULT_SIZE); in swiotlb_size_or_default()
409 size_t size, enum dma_data_direction dir) in swiotlb_bounce() argument
421 while (size) { in swiotlb_bounce()
422 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce()
433 size -= sz; in swiotlb_bounce()
439 memcpy(vaddr, phys_to_virt(orig_addr), size); in swiotlb_bounce()
441 memcpy(phys_to_virt(orig_addr), vaddr, size); in swiotlb_bounce()
628 size_t size, enum dma_data_direction dir, in swiotlb_tbl_sync_single() argument
[all …]
Dvirt.c10 static void *dma_virt_alloc(struct device *dev, size_t size, in dma_virt_alloc() argument
16 ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size)); in dma_virt_alloc()
22 static void dma_virt_free(struct device *dev, size_t size, in dma_virt_free() argument
26 free_pages((unsigned long)cpu_addr, get_order(size)); in dma_virt_free()
30 unsigned long offset, size_t size, in dma_virt_map_page() argument
/kernel/debug/kdb/
Dkdb_support.c326 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument
328 int ret = probe_kernel_read((char *)res, (char *)addr, size); in kdb_getarea_size()
351 int kdb_putarea_size(unsigned long addr, void *res, size_t size) in kdb_putarea_size() argument
353 int ret = probe_kernel_read((char *)addr, (char *)res, size); in kdb_putarea_size()
377 static int kdb_getphys(void *res, unsigned long addr, size_t size) in kdb_getphys() argument
388 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys()
403 int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) in kdb_getphysword() argument
412 switch (size) { in kdb_getphysword()
429 if (size <= sizeof(*word)) { in kdb_getphysword()
438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); in kdb_getphysword()
[all …]
/kernel/events/
Dring_buffer.c139 unsigned long data_size, unsigned int size, in ring_buffer_has_space() argument
143 return CIRC_SPACE(head, tail, data_size) >= size; in ring_buffer_has_space()
145 return CIRC_SPACE(tail, head, data_size) >= size; in ring_buffer_has_space()
150 struct perf_event *event, unsigned int size, in __perf_output_begin() argument
184 size += sizeof(lost_event); in __perf_output_begin()
186 size += event->id_header_size; in __perf_output_begin()
197 size, backward))) in __perf_output_begin()
214 head += size; in __perf_output_begin()
216 head -= size; in __perf_output_begin()
237 handle->size = (1UL << page_shift) - offset; in __perf_output_begin()
[all …]
Dinternal.h93 unsigned long size, u64 flags);
130 unsigned long size, written; \
133 size = min(handle->size, len); \
135 written = size - written; \
141 handle->size -= written; \
142 if (!handle->size) { \
148 handle->size = PAGE_SIZE << page_order(rb); \
150 } while (len && written == size); \
159 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
167 orig_len - len, size) in __output_custom()
/kernel/bpf/
Dlocal_storage.c177 u32 size; in bpf_percpu_cgroup_storage_copy() local
190 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy()
193 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy()
194 off += size; in bpf_percpu_cgroup_storage_copy()
207 u32 size; in bpf_percpu_cgroup_storage_update() local
225 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update()
228 value + off, size); in bpf_percpu_cgroup_storage_update()
229 off += size; in bpf_percpu_cgroup_storage_update()
339 u32 offset, size; in cgroup_storage_check_btf() local
360 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id); in cgroup_storage_check_btf()
[all …]
Dqueue_stack_maps.c20 u32 size; /* max_entries + 1 */ member
39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full()
72 u64 size, queue_size, cost; in queue_stack_map_alloc() local
74 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc()
75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc()
92 qs->size = size; in queue_stack_map_alloc()
133 if (unlikely(++qs->tail >= qs->size)) in __queue_map_get()
160 if (unlikely(index >= qs->size)) in __stack_map_get()
161 index = qs->size - 1; in __stack_map_get()
224 if (unlikely(++qs->tail >= qs->size)) in queue_stack_map_push_elem()
[all …]
Dtnum.c150 struct tnum tnum_cast(struct tnum a, u8 size) in tnum_cast() argument
152 a.value &= (1ULL << (size * 8)) - 1; in tnum_cast()
153 a.mask &= (1ULL << (size * 8)) - 1; in tnum_cast()
157 bool tnum_is_aligned(struct tnum a, u64 size) in tnum_is_aligned() argument
159 if (!size) in tnum_is_aligned()
161 return !((a.value | a.mask) & (size - 1)); in tnum_is_aligned()
172 int tnum_strn(char *str, size_t size, struct tnum a) in tnum_strn() argument
174 return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask); in tnum_strn()
178 int tnum_sbin(char *str, size_t size, struct tnum a) in tnum_sbin() argument
183 if (n < size) { in tnum_sbin()
[all …]
/kernel/trace/
Dbpf_trace.c141 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) in BPF_CALL_3() argument
149 ret = probe_kernel_read(dst, unsafe_ptr, size); in BPF_CALL_3()
152 memset(dst, 0, size); in BPF_CALL_3()
167 u32, size) in BPF_CALL_3() argument
190 return probe_user_write(unsafe_ptr, src, size); in BPF_CALL_3()
393 struct bpf_perf_event_value *, buf, u32, size) in BPF_CALL_4() argument
397 if (unlikely(size != sizeof(struct bpf_perf_event_value))) in BPF_CALL_4()
405 memset(buf, 0, size); in BPF_CALL_4()
460 u64, flags, void *, data, u64, size) in BPF_CALL_5() argument
466 .size = size, in BPF_CALL_5()
[all …]
Dtrace_probe_tmpl.h9 switch (code->size) { in fetch_store_raw()
66 probe_mem_read(void *dest, void *src, size_t size);
68 probe_mem_read_user(void *dest, void *src, size_t size);
119 probe_mem_read(dest, (void *)val + code->offset, code->size); in process_fetch_insn_bottom()
122 probe_mem_read_user(dest, (void *)val + code->offset, code->size); in process_fetch_insn_bottom()
151 dest += s3->size; in process_fetch_insn_bottom()
152 val += s3->size; in process_fetch_insn_bottom()
196 void *dyndata = data + tp->size; in store_trace_args()
238 p += a->type->size; in print_probe_args()
/kernel/printk/
Dprintk.c568 u32 size; in msg_used_size() local
570 size = sizeof(struct printk_log) + text_len + dict_len; in msg_used_size()
571 *pad_len = (-size) & (LOG_ALIGN - 1); in msg_used_size()
572 size += *pad_len; in msg_used_size()
574 return size; in msg_used_size()
610 u32 size, pad_len; in log_store() local
614 size = msg_used_size(text_len, dict_len, &pad_len); in log_store()
616 if (log_make_free_space(size)) { in log_store()
618 size = truncate_msg(&text_len, &trunc_msg_len, in log_store()
621 if (log_make_free_space(size)) in log_store()
[all …]
/kernel/sched/
Dcpudeadline.c33 if (left_child(idx) >= cp->size) in cpudl_heapify_down()
45 if ((l < cp->size) && dl_time_before(orig_dl, in cpudl_heapify_down()
50 if ((r < cp->size) && dl_time_before(largest_dl, in cpudl_heapify_down()
167 new_cpu = cp->elements[cp->size - 1].cpu; in cpudl_clear()
168 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; in cpudl_clear()
170 cp->size--; in cpudl_clear()
201 int new_idx = cp->size++; in cpudl_set()
245 cp->size = 0; in cpudl_init()
/kernel/livepatch/
Dshadow.c105 size_t size, gfp_t gfp_flags, in __klp_shadow_get_or_alloc() argument
123 new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); in __klp_shadow_get_or_alloc()
197 size_t size, gfp_t gfp_flags, in klp_shadow_alloc() argument
200 return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, in klp_shadow_alloc()
226 size_t size, gfp_t gfp_flags, in klp_shadow_get_or_alloc() argument
229 return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, in klp_shadow_get_or_alloc()

12345