/kernel/time/ |
D | timekeeping_internal.h | 19 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument 21 u64 ret = (now - last) & mask; in clocksource_delta() 30 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument 32 return (now - last) & mask; in clocksource_delta()
|
D | Kconfig | 20 # Clocksources require validation of the clocksource against the last
|
D | timekeeping.c | 243 u64 now, last, mask, max, delta; in timekeeping_get_delta() local 256 last = tkr->cycle_last; in timekeeping_get_delta() 261 delta = clocksource_delta(now, last, mask); in timekeeping_get_delta()
|
/kernel/ |
D | user_namespace.c | 226 u32 first, last, id2; in cmp_map_id() local 238 last = first + el->count - 1; in cmp_map_id() 240 if (key->id >= first && key->id <= last && in cmp_map_id() 241 (id2 >= first && id2 <= last)) in cmp_map_id() 276 u32 first, last, id2; in map_id_range_down_base() local 283 last = first + map->extent[idx].count - 1; in map_id_range_down_base() 284 if (id >= first && id <= last && in map_id_range_down_base() 285 (id2 >= first && id2 <= last)) in map_id_range_down_base() 325 u32 first, last; in map_id_up_base() local 330 last = first + map->extent[idx].count - 1; in map_id_up_base() [all …]
|
D | cpu.c | 76 struct hlist_node *last; member 470 st->last = NULL; in cpuhp_set_state() 488 if (!st->last) { in cpuhp_reset_state() 720 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun() 728 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun() 773 st->last = NULL; in cpuhp_invoke_ap_callback() 785 if ((ret = st->result) && st->last) { in cpuhp_invoke_ap_callback() 796 st->node = st->last = NULL; in cpuhp_invoke_ap_callback()
|
D | fork.c | 484 struct vm_area_struct *mpnt, *tmp, *prev, **pprev, *last = NULL; in dup_mmap() local 611 last = mpnt; in dup_mmap() 636 for (; last; last = last->vm_prev) { in dup_mmap() 637 if (last->vm_flags & VM_DONTCOPY) in dup_mmap() 639 if (!(last->vm_flags & VM_WIPEONFORK)) in dup_mmap() 640 vm_write_end(last); in dup_mmap()
|
D | kprobes.c | 1963 struct kretprobe_instance *ri = NULL, *last = NULL; in __kretprobe_trampoline_handler() local 2019 last = ri; in __kretprobe_trampoline_handler() 2039 if (ri == last) in __kretprobe_trampoline_handler()
|
/kernel/bpf/ |
D | bpf_lru_list.c | 175 struct list_head *cur, *last, *next = inactive; in __bpf_lru_list_rotate_inactive() local 182 last = l->next_inactive_rotation->next; in __bpf_lru_list_rotate_inactive() 183 if (last == inactive) in __bpf_lru_list_rotate_inactive() 184 last = last->next; in __bpf_lru_list_rotate_inactive() 197 if (cur == last) in __bpf_lru_list_rotate_inactive()
|
/kernel/sched/ |
D | topology.c | 934 struct sched_group *first = NULL, *last = NULL, *sg; in build_overlap_sched_groups() local 975 if (last) in build_overlap_sched_groups() 976 last->next = sg; in build_overlap_sched_groups() 977 last = sg; in build_overlap_sched_groups() 978 last->next = first; in build_overlap_sched_groups() 1109 struct sched_group *first = NULL, *last = NULL; in build_sched_groups() local 1132 if (last) in build_sched_groups() 1133 last->next = sg; in build_sched_groups() 1134 last = sg; in build_sched_groups() 1136 last->next = first; in build_sched_groups()
|
D | debug.c | 562 struct sched_entity *last; in print_cfs_rq() local 578 last = __pick_last_entity(cfs_rq); in print_cfs_rq() 579 if (last) in print_cfs_rq() 580 max_vruntime = last->vruntime; in print_cfs_rq()
|
D | fair.c | 641 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity() local 643 if (!last) in __pick_last_entity() 646 return rb_entry(last, struct sched_entity, run_node); in __pick_last_entity() 4480 if (cfs_rq->last != se) in __clear_buddies_last() 4483 cfs_rq->last = NULL; in __clear_buddies_last() 4511 if (cfs_rq->last == se) in clear_buddies() 4707 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { in pick_next_entity() 4711 se = cfs_rq->last; in pick_next_entity() 7232 cfs_rq_of(se)->last = se; in set_last_buddy() 7798 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
|
D | sched.h | 551 struct sched_entity *last; member
|
/kernel/locking/ |
D | test-ww_mutex.c | 278 unsigned int n, last = nthreads - 1; in __test_cycle() local 289 if (n == last) in __test_cycle() 295 cycle->a_signal = &cycles[last].b_signal; in __test_cycle()
|
/kernel/power/ |
D | swap.c | 975 struct swap_map_page_list *tmp, *last; in get_swap_reader() local 984 last = handle->maps = NULL; in get_swap_reader() 994 if (last) in get_swap_reader() 995 last->next = tmp; in get_swap_reader() 996 last = tmp; in get_swap_reader()
|
D | Kconfig | 229 This enables code to save the last PM event point across 247 This enables some cheesy code to save the last PM event point in the
|
/kernel/events/ |
D | ring_buffer.c | 708 int last, order; in rb_alloc_aux() local 715 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux() 716 last > rb->aux_nr_pages; rb->aux_nr_pages++) in rb_alloc_aux()
|
/kernel/trace/ |
D | Kconfig | 268 and last enabled. 720 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 721 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 722 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 723 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 724 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 725 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
|
D | trace.c | 2017 struct tracer *t, **last; in init_trace_selftests() local 2040 last = &trace_types; in init_trace_selftests() 2043 *last = t->next; in init_trace_selftests() 2046 last = &t->next; in init_trace_selftests() 9224 union trace_eval_map_item **last = &trace_eval_maps; in trace_module_remove_evals() local 9237 last = &map->tail.next; in trace_module_remove_evals() 9243 *last = trace_eval_jmp_to_tail(map)->tail.next; in trace_module_remove_evals()
|
/kernel/cgroup/ |
D | cgroup.c | 4269 unsigned long last = cfile->notified_at; in cgroup_file_notify() local 4270 unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV; in cgroup_file_notify() 4272 if (time_in_range(jiffies, last, next)) { in cgroup_file_notify() 4413 struct cgroup_subsys_state *last, *tmp; in css_rightmost_descendant() local 4418 last = pos; in css_rightmost_descendant() 4421 css_for_each_child(tmp, last) in css_rightmost_descendant() 4425 return last; in css_rightmost_descendant() 4431 struct cgroup_subsys_state *last; in css_leftmost_descendant() local 4434 last = pos; in css_leftmost_descendant() 4438 return last; in css_leftmost_descendant()
|
/kernel/rcu/ |
D | Kconfig | 173 bool "Accelerate last non-dyntick-idle CPU's grace periods"
|