Home
last modified time | relevance | path

Searched refs:size (Results 1 – 25 of 66) sorted by relevance

123

/kernel/debug/kdb/
Dkdb_support.c326 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument
328 int ret = probe_kernel_read((char *)res, (char *)addr, size); in kdb_getarea_size()
351 int kdb_putarea_size(unsigned long addr, void *res, size_t size) in kdb_putarea_size() argument
353 int ret = probe_kernel_read((char *)addr, (char *)res, size); in kdb_putarea_size()
377 static int kdb_getphys(void *res, unsigned long addr, size_t size) in kdb_getphys() argument
388 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys()
403 int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) in kdb_getphysword() argument
412 switch (size) { in kdb_getphysword()
429 if (size <= sizeof(*word)) { in kdb_getphysword()
438 kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); in kdb_getphysword()
[all …]
/kernel/
Dkcov.c36 unsigned size; member
128 unsigned long size, off; in kcov_mmap() local
136 size = kcov->size * sizeof(unsigned long); in kcov_mmap()
138 vma->vm_end - vma->vm_start != size) { in kcov_mmap()
146 for (off = 0; off < size; off += PAGE_SIZE) { in kcov_mmap()
182 unsigned long size, unused; in kcov_ioctl_locked() local
197 size = arg; in kcov_ioctl_locked()
198 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) in kcov_ioctl_locked()
200 kcov->size = size; in kcov_ioctl_locked()
219 t->kcov_size = kcov->size; in kcov_ioctl_locked()
Dtaskstats.c80 size_t size) in prepare_reply() argument
88 skb = genlmsg_new(size, GFP_KERNEL); in prepare_reply()
431 size_t size; in cgroupstats_user_cmd() local
444 size = nla_total_size(sizeof(struct cgroupstats)); in cgroupstats_user_cmd()
447 size); in cgroupstats_user_cmd()
509 size_t size; in taskstats_packet_size() local
511 size = nla_total_size(sizeof(u32)) + in taskstats_packet_size()
514 size += nla_total_size(0); /* Padding for alignment */ in taskstats_packet_size()
516 return size; in taskstats_packet_size()
523 size_t size; in cmd_attr_pid() local
[all …]
Dresource.c257 resource_size_t size; in __release_child_resources() local
271 size = resource_size(tmp); in __release_child_resources()
273 tmp->end = size - 1; in __release_child_resources()
502 int region_is_ram(resource_size_t start, unsigned long size) in region_is_ram() argument
505 resource_size_t end = start + size - 1; in region_is_ram()
536 resource_size_t size, in simple_align_resource() argument
557 resource_size_t size, in __find_resource() argument
591 size, constraint->align); in __find_resource()
592 alloc.end = alloc.start + size - 1; in __find_resource()
614 resource_size_t size, in find_resource() argument
[all …]
/kernel/events/
Dring_buffer.c105 struct perf_event *event, unsigned int size) in perf_output_begin() argument
135 size += sizeof(lost_event); in perf_output_begin()
137 size += event->id_header_size; in perf_output_begin()
146 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) in perf_output_begin()
161 head += size; in perf_output_begin()
177 handle->size = (1UL << page_shift) - offset; in perf_output_begin()
182 lost_event.header.size = sizeof(lost_event); in perf_output_begin()
279 unsigned long size; in rb_alloc() local
282 size = sizeof(struct ring_buffer); in rb_alloc()
283 size += nr_pages * sizeof(void *); in rb_alloc()
[all …]
Dinternal.h89 unsigned long size, written; \
92 size = min(handle->size, len); \
93 written = memcpy_func(handle->addr, buf, size); \
94 written = size - written; \
99 handle->size -= written; \
100 if (!handle->size) { \
106 handle->size = PAGE_SIZE << page_order(rb); \
108 } while (len && written == size); \
Dcore.c1259 int size = 0; in perf_event__read_size() local
1263 size += sizeof(u64); in perf_event__read_size()
1266 size += sizeof(u64); in perf_event__read_size()
1273 size += sizeof(u64); in perf_event__read_size()
1276 size += entry * nr; in perf_event__read_size()
1277 event->read_size = size; in perf_event__read_size()
1284 u16 size = 0; in perf_event__header_size() local
1289 size += sizeof(data->ip); in perf_event__header_size()
1292 size += sizeof(data->addr); in perf_event__header_size()
1295 size += sizeof(data->period); in perf_event__header_size()
[all …]
Dcallchain.c62 int size; in alloc_callchain_buffers() local
70 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); in alloc_callchain_buffers()
72 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers()
76 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; in alloc_callchain_buffers()
79 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, in alloc_callchain_buffers()
/kernel/sched/
Dcpudeadline.c59 if ((l < cp->size) && dl_time_before(cp->elements[idx].dl, in cpudl_heapify()
62 if ((r < cp->size) && dl_time_before(cp->elements[largest].dl, in cpudl_heapify()
155 new_cpu = cp->elements[cp->size - 1].cpu; in cpudl_set()
156 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; in cpudl_set()
158 cp->size--; in cpudl_set()
174 cp->size++; in cpudl_set()
175 cp->elements[cp->size - 1].dl = 0; in cpudl_set()
176 cp->elements[cp->size - 1].cpu = cpu; in cpudl_set()
177 cp->elements[cpu].idx = cp->size - 1; in cpudl_set()
178 cpudl_change_key(cp, cp->size - 1, dl); in cpudl_set()
[all …]
/kernel/bpf/
Dcore.c57 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) in bpf_internal_load_pointer_neg_helper() argument
65 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) in bpf_internal_load_pointer_neg_helper()
71 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) in bpf_prog_alloc() argument
78 size = round_up(size, PAGE_SIZE); in bpf_prog_alloc()
79 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); in bpf_prog_alloc()
89 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc()
96 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, in bpf_prog_realloc() argument
105 size = round_up(size, PAGE_SIZE); in bpf_prog_realloc()
106 if (size <= fp_old->pages * PAGE_SIZE) in bpf_prog_realloc()
109 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); in bpf_prog_realloc()
[all …]
Dverifier.c539 static int check_stack_write(struct verifier_state *state, int off, int size, in check_stack_write() argument
551 if (size != 8) { in check_stack_write()
569 for (i = 0; i < size; i++) { in check_stack_write()
579 static int check_stack_read(struct verifier_state *state, int off, int size, in check_stack_read() argument
588 if (size != 8) { in check_stack_read()
605 for (i = 0; i < size; i++) { in check_stack_read()
609 off, i, size); in check_stack_read()
622 int size) in check_map_access() argument
626 if (off < 0 || off + size > map->value_size) { in check_map_access()
628 map->value_size, off, size); in check_map_access()
[all …]
Dtest_stub.c44 int size; member
57 static bool test_is_valid_access(int off, int size, enum bpf_access_type type) in test_is_valid_access() argument
65 if (access->size == size && (access->type & type)) in test_is_valid_access()
/kernel/printk/
Dprintk.c379 u32 size; in msg_used_size() local
381 size = sizeof(struct printk_log) + text_len + dict_len; in msg_used_size()
382 *pad_len = (-size) & (LOG_ALIGN - 1); in msg_used_size()
383 size += *pad_len; in msg_used_size()
385 return size; in msg_used_size()
421 u32 size, pad_len; in log_store() local
425 size = msg_used_size(text_len, dict_len, &pad_len); in log_store()
427 if (log_make_free_space(size)) { in log_store()
429 size = truncate_msg(&text_len, &trunc_msg_len, in log_store()
432 if (log_make_free_space(size)) in log_store()
[all …]
/kernel/trace/
Dtrace_probe.c498 bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo); in __parse_bitfield_probe_arg()
501 return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; in __parse_bitfield_probe_arg()
505 int traceprobe_parse_probe_arg(char *arg, ssize_t *size, in traceprobe_parse_probe_arg() argument
534 parg->offset = *size; in traceprobe_parse_probe_arg()
535 *size += parg->type->size; in traceprobe_parse_probe_arg()
619 size_t size; in traceprobe_probes_write() local
626 size = count - done; in traceprobe_probes_write()
628 if (size >= WRITE_BUFSIZE) in traceprobe_probes_write()
629 size = WRITE_BUFSIZE - 1; in traceprobe_probes_write()
631 if (copy_from_user(kbuf, buffer + done, size)) { in traceprobe_probes_write()
[all …]
Dtrace_syscalls.c313 int size; in ftrace_syscall_enter() local
331 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; in ftrace_syscall_enter()
338 sys_data->enter_event->event.type, size, irq_flags, pc); in ftrace_syscall_enter()
564 int size; in perf_syscall_enter() local
581 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); in perf_syscall_enter()
582 size = ALIGN(size + sizeof(u32), sizeof(u64)); in perf_syscall_enter()
583 size -= sizeof(u32); in perf_syscall_enter()
585 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, in perf_syscall_enter()
593 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); in perf_syscall_enter()
638 int size; in perf_syscall_exit() local
[all …]
Dtrace_uprobe.c537 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, in create_trace_uprobe()
779 int size, esize; in __uprobe_trace_func() local
784 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) in __uprobe_trace_func()
791 size = esize + tu->tp.size + dsize; in __uprobe_trace_func()
793 call->event.type, size, 0, 0); in __uprobe_trace_func()
807 memcpy(data, ucb->buf, tu->tp.size + dsize); in __uprobe_trace_func()
975 int ret, i, size; in uprobe_event_define_fields() local
982 size = SIZEOF_TRACE_ENTRY(true); in uprobe_event_define_fields()
985 size = SIZEOF_TRACE_ENTRY(false); in uprobe_event_define_fields()
992 parg->name, size + parg->offset, in uprobe_event_define_fields()
[all …]
Dtrace_stack.c56 int size; in print_max_stack() local
67 size = stack_dump_index[i]; in print_max_stack()
69 size = stack_dump_index[i] - stack_dump_index[i+1]; in print_max_stack()
72 size, (void *)stack_dump_trace[i]); in print_max_stack()
353 int size; in t_show() local
375 size = stack_dump_index[i]; in t_show()
377 size = stack_dump_index[i] - stack_dump_index[i+1]; in t_show()
379 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); in t_show()
Dtrace_kprobe.c757 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, in create_trace_kprobe()
919 int size, dsize, pc; in __kprobe_trace_func() local
932 size = sizeof(*entry) + tk->tp.size + dsize; in __kprobe_trace_func()
936 size, irq_flags, pc); in __kprobe_trace_func()
967 int size, pc, dsize; in __kretprobe_trace_func() local
980 size = sizeof(*entry) + tk->tp.size + dsize; in __kretprobe_trace_func()
984 size, irq_flags, pc); in __kretprobe_trace_func()
1102 parg->type->size, in kprobe_event_define_fields()
1126 parg->type->size, in kretprobe_event_define_fields()
1144 int size, __size, dsize; in kprobe_perf_func() local
[all …]
Dtrace.c464 int __trace_puts(unsigned long ip, const char *str, int size) in __trace_puts() argument
481 alloc = sizeof(*entry) + size + 2; /* possible \n added */ in __trace_puts()
493 memcpy(&entry->buf, str, size); in __trace_puts()
496 if (entry->buf[size - 1] != '\n') { in __trace_puts()
497 entry->buf[size] = '\n'; in __trace_puts()
498 entry->buf[size + 1] = '\0'; in __trace_puts()
500 entry->buf[size] = '\0'; in __trace_puts()
505 return size; in __trace_puts()
520 int size = sizeof(struct bputs_entry); in __trace_bputs() local
533 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, in __trace_bputs()
[all …]
/kernel/irq/
Dirqdomain.c39 struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, in __irq_domain_add() argument
46 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), in __irq_domain_add()
57 domain->revmap_size = size; in __irq_domain_add()
124 unsigned int size, in irq_domain_add_simple() argument
131 domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); in irq_domain_add_simple()
138 int rc = irq_alloc_descs(first_irq, first_irq, size, in irq_domain_add_simple()
144 irq_domain_associate_many(domain, first_irq, 0, size); in irq_domain_add_simple()
167 unsigned int size, in irq_domain_add_legacy() argument
175 domain = __irq_domain_add(of_node, first_hwirq + size, in irq_domain_add_legacy()
176 first_hwirq + size, 0, ops, host_data); in irq_domain_add_legacy()
[all …]
/kernel/gcov/
Dgcc_3_4.c217 size_t size; in get_fn_size() local
219 size = sizeof(struct gcov_fn_info) + num_counter_active(info) * in get_fn_size()
222 size = ALIGN(size, __alignof__(struct gcov_fn_info)); in get_fn_size()
223 return size; in get_fn_size()
267 size_t size = ctr->num * sizeof(gcov_type); in gcov_info_dup() local
271 dup->counts[i].values = vmalloc(size); in gcov_info_dup()
274 memcpy(dup->counts[i].values, ctr->values, size); in gcov_info_dup()
Dgcc_4_7.c365 size_t size; member
488 iter->size = convert_to_gcda(NULL, info); in gcov_iter_new()
489 iter->buffer = vmalloc(iter->size); in gcov_iter_new()
539 if (iter->pos < iter->size) in gcov_iter_next()
542 if (iter->pos >= iter->size) in gcov_iter_next()
559 if (iter->pos >= iter->size) in gcov_iter_write()
563 if (iter->pos + len > iter->size) in gcov_iter_write()
564 len = iter->size - iter->pos; in gcov_iter_write()
/kernel/power/
Dsnapshot.c203 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) in chain_alloc() argument
207 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { in chain_alloc()
219 ca->used_space += size; in chain_alloc()
1521 unsigned long size; in minimum_image_size() local
1523 size = global_page_state(NR_SLAB_RECLAIMABLE) in minimum_image_size()
1530 return saveable <= size ? 0 : saveable - size; in minimum_image_size()
1558 unsigned long saveable, size, max_size, count, highmem, pages = 0; in hibernate_preallocate_memory() local
1588 size = 0; in hibernate_preallocate_memory()
1590 size += snapshot_additional_pages(zone); in hibernate_preallocate_memory()
1601 size += page_key_additional_pages(saveable); in hibernate_preallocate_memory()
[all …]
Duser.c204 loff_t size; in snapshot_ioctl() local
300 size = snapshot_get_image_size(); in snapshot_ioctl()
301 size <<= PAGE_SHIFT; in snapshot_ioctl()
302 error = put_user(size, (loff_t __user *)arg); in snapshot_ioctl()
306 size = count_swap_pages(data->swap, 1); in snapshot_ioctl()
307 size <<= PAGE_SHIFT; in snapshot_ioctl()
308 error = put_user(size, (loff_t __user *)arg); in snapshot_ioctl()
/kernel/debug/
Dgdbstub.c327 int size = 0; in kgdb_ebin2mem() local
331 c[size] = *buf++; in kgdb_ebin2mem()
332 if (c[size] == 0x7d) in kgdb_ebin2mem()
333 c[size] = *buf++ ^ 0x20; in kgdb_ebin2mem()
334 size++; in kgdb_ebin2mem()
337 return probe_kernel_write(mem, c, size); in kgdb_ebin2mem()
349 idx += dbg_reg_def[i].size; in pt_regs_to_gdb_regs()
361 idx += dbg_reg_def[i].size; in gdb_regs_to_pt_regs()
592 offset += dbg_reg_def[i].size; in gdb_hex_reg_helper()
594 dbg_reg_def[i].size); in gdb_hex_reg_helper()

123