/kernel/ |
D | range.c | 12 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) in add_range() argument 14 if (start >= end) in add_range() 21 range[nr_range].start = start; in add_range() 30 u64 start, u64 end) in add_range_with_merge() argument 34 if (start >= end) in add_range_with_merge() 44 common_start = max(range[i].start, start); in add_range_with_merge() 50 start = min(range[i].start, start); in add_range_with_merge() 55 range[nr_range - 1].start = 0; in add_range_with_merge() 62 return add_range(range, az, nr_range, start, end); in add_range_with_merge() 65 void subtract_range(struct range *range, int az, u64 start, u64 end) in subtract_range() argument [all …]
|
D | resource.c | 35 .start = 0, 43 .start = 0, 112 unsigned long long start, end; in r_show() local 121 start = r->start; in r_show() 124 start = end = 0; in r_show() 129 width, start, in r_show() 136 .start = r_start, 173 resource_size_t start = new->start; in __request_resource() local 177 if (end < start) in __request_resource() 179 if (start < root->start) in __request_resource() [all …]
|
D | resource_kunit.c | 22 static struct resource r0 = { .start = R0_START, .end = R0_END }; 23 static struct resource r1 = { .start = R1_START, .end = R1_END }; 24 static struct resource r2 = { .start = R2_START, .end = R2_END }; 25 static struct resource r3 = { .start = R3_START, .end = R3_END }; 26 static struct resource r4 = { .start = R4_START, .end = R4_END }; 37 .r1 = &r1, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 39 .r1 = &r2, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 41 .r1 = &r3, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 43 .r1 = &r4, .r2 = &r0, .r.start = R0_START, .r.end = R0_END, .ret = true, 49 .r1 = &r4, .r2 = &r1, .r.start = R1_START, .r.end = R4_END, .ret = true, [all …]
|
D | static_call_inline.c | 95 static inline void static_call_sort_entries(struct static_call_site *start, in static_call_sort_entries() argument 98 sort(start, stop - start, sizeof(struct static_call_site), in static_call_sort_entries() 210 struct static_call_site *start, in __static_call_init() argument 217 if (start == stop) in __static_call_init() 220 static_call_sort_entries(start, stop); in __static_call_init() 222 for (site = start; site < stop; site++) { in __static_call_init() 282 static int addr_conflict(struct static_call_site *site, void *start, void *end) in addr_conflict() argument 287 addr + CALL_INSN_SIZE > (unsigned long)start) in addr_conflict() 295 void *start, void *end, bool init) in __static_call_text_reserved() argument 301 if (addr_conflict(iter, start, end)) in __static_call_text_reserved() [all …]
|
D | kexec_file.c | 285 image->control_page = crashk_res.start; in kimage_file_alloc_init() 419 static int locate_mem_hole_top_down(unsigned long start, unsigned long end, in locate_mem_hole_top_down() argument 432 if (temp_start < start || temp_start < kbuf->buf_min) in locate_mem_hole_top_down() 457 static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, in locate_mem_hole_bottom_up() argument 463 temp_start = max(start, kbuf->buf_min); in locate_mem_hole_bottom_up() 494 u64 start = res->start, end = res->end; in locate_mem_hole_callback() local 495 unsigned long sz = end - start + 1; in locate_mem_hole_callback() 506 if (end < kbuf->buf_min || start > kbuf->buf_max) in locate_mem_hole_callback() 514 return locate_mem_hole_top_down(start, end, kbuf); in locate_mem_hole_callback() 515 return locate_mem_hole_bottom_up(start, end, kbuf); in locate_mem_hole_callback() [all …]
|
D | jump_label.c | 80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) in jump_label_sort_entries() argument 88 size = (((unsigned long)stop - (unsigned long)start) in jump_label_sort_entries() 90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); in jump_label_sort_entries() 309 static int addr_conflict(struct jump_entry *entry, void *start, void *end) in addr_conflict() argument 312 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) in addr_conflict() 319 struct jump_entry *iter_stop, void *start, void *end, bool init) in __jump_label_text_reserved() argument 326 if (addr_conflict(iter, start, end)) in __jump_label_text_reserved() 546 static int __jump_label_mod_text_reserved(void *start, void *end) in __jump_label_mod_text_reserved() argument 552 mod = __module_text_address((unsigned long)start); in __jump_label_mod_text_reserved() 563 start, end, mod->state == MODULE_STATE_COMING); in __jump_label_mod_text_reserved() [all …]
|
D | kexec_core.c | 61 .start = 0, 68 .start = 0, 246 if ((mstart < phys_to_boot_phys(crashk_res.start)) || in sanity_check_segment_list() 283 unsigned long start, in kimage_is_destination_range() argument 293 if ((end > mstart) && (start < mend)) in kimage_is_destination_range() 1014 if (crashk_res.end != crashk_res.start) in crash_get_memory_size() 1024 unsigned long start, end; in crash_shrink_memory() local 1035 start = crashk_res.start; in crash_shrink_memory() 1037 old_size = (end == 0) ? 0 : end - start + 1; in crash_shrink_memory() 1050 end = start + new_size; in crash_shrink_memory() [all …]
|
D | kprobes.c | 2484 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) in kprobe_add_area_blacklist() argument 2489 for (entry = start; entry < end; entry += ret) { in kprobe_add_area_blacklist() 2500 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) in kprobe_remove_area_blacklist() argument 2505 if (ent->start_addr < start || ent->start_addr >= end) in kprobe_remove_area_blacklist() 2552 static int __init populate_kprobe_blacklist(unsigned long *start, in populate_kprobe_blacklist() argument 2559 for (iter = start; iter < end; iter++) { in populate_kprobe_blacklist() 2583 unsigned long start, end; in add_module_kprobe_blacklist() local 2591 start = (unsigned long)mod->kprobes_text_start; in add_module_kprobe_blacklist() 2592 if (start) { in add_module_kprobe_blacklist() 2593 end = start + mod->kprobes_text_size; in add_module_kprobe_blacklist() [all …]
|
D | kallsyms.c | 211 unsigned int *start, in kallsyms_lookup_names() argument 248 *start = low; in kallsyms_lookup_names() 310 unsigned int i, start, end; in kallsyms_on_each_match_symbol() local 312 ret = kallsyms_lookup_names(name, &start, &end); in kallsyms_on_each_match_symbol() 316 for (i = start; !ret && i <= end; i++) { in kallsyms_on_each_match_symbol() 865 .start = s_start, 908 .start = s_start,
|
/kernel/dma/ |
D | swiotlb.c | 222 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info() 247 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; in swiotlb_mem_remap() 278 vaddr = phys_to_virt(mem->start); in swiotlb_update_mem_attributes() 287 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, in swiotlb_init_io_tlb_mem() argument 291 void *vaddr = phys_to_virt(start); in swiotlb_init_io_tlb_mem() 295 mem->start = start; in swiotlb_init_io_tlb_mem() 296 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_mem() 519 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit() 520 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit() 533 memblock_free_late(mem->start, tbl_size); in swiotlb_exit() [all …]
|
/kernel/bpf/ |
D | trampoline.c | 121 ksym->start = (unsigned long) data; in bpf_image_ksym_add() 122 ksym->end = ksym->start + PAGE_SIZE; in bpf_image_ksym_add() 124 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, in bpf_image_ksym_add() 131 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, in bpf_image_ksym_del() 857 u64 start = NO_START_TIME; in bpf_prog_start_time() local 860 start = sched_clock(); in bpf_prog_start_time() 861 if (unlikely(!start)) in bpf_prog_start_time() 862 start = NO_START_TIME; in bpf_prog_start_time() 864 return start; in bpf_prog_start_time() 896 u64 start) in update_prog_stats() argument [all …]
|
D | cgroup_iter.c | 154 .start = cgroup_iter_seq_start, 165 struct cgroup *cgrp = aux->cgroup.start; in BTF_ID_LIST_SINGLE() 222 aux->cgroup.start = cgrp; in bpf_iter_attach_cgroup() 229 cgroup_put(aux->cgroup.start); in bpf_iter_detach_cgroup() 248 cgroup_path_ns(aux->cgroup.start, buf, PATH_MAX, in bpf_iter_cgroup_show_fdinfo() 268 info->iter.cgroup.cgroup_id = cgroup_id(aux->cgroup.start); in bpf_iter_cgroup_fill_link_info()
|
/kernel/irq/ |
D | irqdesc.c | 470 static int alloc_descs(unsigned int start, unsigned int cnt, int node, in alloc_descs() argument 499 desc = alloc_desc(start + i, node, flags, mask, owner); in alloc_descs() 502 irq_insert_desc(start + i, desc); in alloc_descs() 503 irq_sysfs_add(start + i, desc); in alloc_descs() 504 irq_add_debugfs_entry(start + i, desc); in alloc_descs() 506 bitmap_set(allocated_irqs, start, cnt); in alloc_descs() 507 return start; in alloc_descs() 511 free_desc(start + i); in alloc_descs() 602 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, in alloc_descs() argument 609 struct irq_desc *desc = irq_to_desc(start + i); in alloc_descs() [all …]
|
D | timings.c | 384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local 412 start = irqs->count < IRQ_TIMINGS_SIZE ? in __irq_timings_next_event() 422 int index = (start + i) & IRQ_TIMINGS_MASK; in __irq_timings_next_event() 716 int index, start, i, count, period_max; in irq_timings_test_next_index() local 735 start = count < IRQ_TIMINGS_SIZE ? 0 : in irq_timings_test_next_index() 741 int index = (start + i) & IRQ_TIMINGS_MASK; in irq_timings_test_next_index() 839 int start = count >= IRQ_TIMINGS_SIZE ? count - IRQ_TIMINGS_SIZE : 0; in irq_timings_test_irqts() local 857 ots += start; in irq_timings_test_irqts() 858 oirq += start; in irq_timings_test_irqts()
|
/kernel/trace/ |
D | trace_printk.c | 53 void hold_module_trace_bprintk_format(const char **start, const char **end) in hold_module_trace_bprintk_format() argument 59 if (start != end) in hold_module_trace_bprintk_format() 63 for (iter = start; iter < end; iter++) { in hold_module_trace_bprintk_format() 93 const char **start = mod->trace_bprintk_fmt_start; in module_trace_bprintk_format_notify() local 94 const char **end = start + mod->num_trace_bprintk_fmt; in module_trace_bprintk_format_notify() 97 hold_module_trace_bprintk_format(start, end); in module_trace_bprintk_format_notify() 354 .start = t_start,
|
D | trace_mmiotrace.c | 64 resource_size_t start, end; in mmio_print_pcidev() local 71 start = dev->resource[i].start; in mmio_print_pcidev() 73 (unsigned long long)(start | in mmio_print_pcidev() 77 start = dev->resource[i].start; in mmio_print_pcidev() 80 dev->resource[i].start < dev->resource[i].end ? in mmio_print_pcidev() 81 (unsigned long long)(end - start) + 1 : 0); in mmio_print_pcidev() 279 .start = mmio_trace_start,
|
D | trace_stack.c | 157 unsigned long this_size, flags; unsigned long *p, *top, *start; in check_stack() local 212 start = stack; in check_stack() 214 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); in check_stack() 227 p = start; in check_stack() 240 start = p + 1; in check_stack() 470 .start = t_start,
|
D | trace_benchmark.c | 39 u64 start; in trace_do_benchmark() local 53 start = trace_clock_local(); in trace_do_benchmark() 60 delta = stop - start; in trace_do_benchmark()
|
D | fgraph.c | 384 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; in alloc_retstack_tasklist() local 393 start = 0; in alloc_retstack_tasklist() 402 if (start == end) { in alloc_retstack_tasklist() 413 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist() 420 for (i = start; i < end; i++) in alloc_retstack_tasklist()
|
D | preemptirq_delay_test.c | 41 u64 start, end; in busy_wait() local 43 start = trace_clock_local(); in busy_wait() 49 } while ((end - start) < (time * 1000)); in busy_wait()
|
/kernel/module/ |
D | tree_lookup.c | 46 unsigned long start, end; in mod_tree_comp() local 48 start = __mod_tree_val(n); in mod_tree_comp() 49 if (val < start) in mod_tree_comp() 52 end = start + __mod_tree_size(n); in mod_tree_comp()
|
D | strict_rwx.c | 36 int (*set_memory)(unsigned long start, int num_pages)) in frob_text() argument 43 int (*set_memory)(unsigned long start, int num_pages)) in frob_rodata() argument 50 int (*set_memory)(unsigned long start, int num_pages)) in frob_ro_after_init() argument 57 int (*set_memory)(unsigned long start, int num_pages)) in frob_writable_data() argument
|
/kernel/power/ |
D | swap.c | 127 unsigned long start; member 143 if (swap_offset < ext->start) { in swsusp_extents_insert() 145 if (swap_offset == ext->start - 1) { in swsusp_extents_insert() 146 ext->start--; in swsusp_extents_insert() 167 ext->start = swap_offset; in swsusp_extents_insert() 210 for (offset = ext->start; offset <= ext->end; offset++) in free_all_swap_pages() 555 ktime_t start; in save_image() local 566 start = ktime_get(); in save_image() 587 swsusp_show_speed(start, stop, nr_to_write, "Wrote"); in save_image() 694 ktime_t start; in save_image_lzo() local [all …]
|
D | process.c | 38 ktime_t start, end, elapsed; in try_to_freeze_tasks() local 45 start = ktime_get_boottime(); in try_to_freeze_tasks() 87 elapsed = ktime_sub(end, start); in try_to_freeze_tasks()
|
/kernel/locking/ |
D | qspinlock_stat.h | 110 u64 start = sched_clock(); in __pv_kick() local 112 per_cpu(pv_kick_time, cpu) = start; in __pv_kick() 114 this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start); in __pv_kick()
|