/kernel/ |
D | user_namespace.c | 225 u32 first, last, id2; in cmp_map_id() local 233 first = el->lower_first; in cmp_map_id() 235 first = el->first; in cmp_map_id() 237 last = first + el->count - 1; in cmp_map_id() 239 if (key->id >= first && key->id <= last && in cmp_map_id() 240 (id2 >= first && id2 <= last)) in cmp_map_id() 243 if (key->id < first || id2 < first) in cmp_map_id() 275 u32 first, last, id2; in map_id_range_down_base() local 281 first = map->extent[idx].first; in map_id_range_down_base() 282 last = first + map->extent[idx].count - 1; in map_id_range_down_base() [all …]
|
D | async.c | 83 struct async_entry *first = NULL; in lowest_in_progress() local 91 first = list_first_entry(&domain->pending, in lowest_in_progress() 95 first = list_first_entry(&async_global_pending, in lowest_in_progress() 99 if (first) in lowest_in_progress() 100 ret = first->cookie; in lowest_in_progress()
|
D | resource.c | 783 struct resource *first, *next; in __insert_resource() local 785 for (;; parent = first) { in __insert_resource() 786 first = __request_resource(parent, new); in __insert_resource() 787 if (!first) in __insert_resource() 788 return first; in __insert_resource() 790 if (first == parent) in __insert_resource() 791 return first; in __insert_resource() 792 if (WARN_ON(first == new)) /* duplicated insertion */ in __insert_resource() 793 return first; in __insert_resource() 795 if ((first->start > new->start) || (first->end < new->end)) in __insert_resource() [all …]
|
D | pid.c | 49 { .first = NULL }, 50 { .first = NULL }, 51 { .first = NULL }, 333 struct hlist_node *first; in pid_task() local 334 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), in pid_task() 336 if (first) in pid_task() 337 result = hlist_entry(first, struct task_struct, pid_links[(type)]); in pid_task()
|
D | taskstats.c | 197 struct task_struct *tsk, *first; in fill_stats_for_tgid() local 208 first = find_task_by_vpid(tgid); in fill_stats_for_tgid() 210 if (!first || !lock_task_sighand(first, &flags)) in fill_stats_for_tgid() 213 if (first->signal->stats) in fill_stats_for_tgid() 214 memcpy(stats, first->signal->stats, sizeof(*stats)); in fill_stats_for_tgid() 218 tsk = first; in fill_stats_for_tgid() 243 } while_each_thread(first, tsk); in fill_stats_for_tgid() 245 unlock_task_sighand(first, &flags); in fill_stats_for_tgid()
|
D | user.c | 33 .first = 0, 43 .first = 0, 53 .first = 0,
|
D | sysctl.c | 2349 int *i, vleft, first = 1, err = 0; in __do_proc_dointvec() local 2376 for (; left && vleft--; i++, first=0) { in __do_proc_dointvec() 2399 if (!first) in __do_proc_dointvec() 2409 if (!write && !first && left && !err) in __do_proc_dointvec() 2415 if (first) in __do_proc_dointvec() 2863 int vleft, first = 1, err = 0; in __do_proc_doulongvec_minmax() local 2889 for (; left && vleft--; i++, first = 0) { in __do_proc_doulongvec_minmax() 2914 if (!first) { in __do_proc_doulongvec_minmax() 2925 if (!write && !first && left && !err) in __do_proc_doulongvec_minmax() 2931 if (first) in __do_proc_doulongvec_minmax() [all …]
|
D | signal.c | 565 struct sigqueue *q, *first = NULL; in collect_signal() local 573 if (first) in collect_signal() 575 first = q; in collect_signal() 581 if (first) { in collect_signal() 583 list_del_init(&first->list); in collect_signal() 584 copy_siginfo(info, &first->info); in collect_signal() 587 (first->flags & SIGQUEUE_PREALLOC) && in collect_signal() 591 __sigqueue_free(first); in collect_signal()
|
D | workqueue.c | 2497 bool first = true; in rescuer_thread() local 2515 if (first) in rescuer_thread() 2519 first = false; in rescuer_thread() 4772 bool first = true; in show_workqueue_state() local 4787 pr_cont(" %s%d", first ? "idle: " : "", in show_workqueue_state() 4789 first = false; in show_workqueue_state()
|
/kernel/bpf/ |
D | percpu_freelist.c | 18 head->first = NULL; in pcpu_freelist_init() 32 node->next = head->first; in ___pcpu_freelist_push() 33 head->first = node; in ___pcpu_freelist_push() 94 node = head->first; in __pcpu_freelist_pop() 96 head->first = node->next; in __pcpu_freelist_pop()
|
D | percpu_freelist.h | 10 struct pcpu_freelist_node *first; member
|
/kernel/sched/ |
D | topology.c | 563 struct sched_group *tmp, *first; in free_sched_groups() local 568 first = sg; in free_sched_groups() 578 } while (sg != first); in free_sched_groups() 932 struct sched_group *first = NULL, *last = NULL, *sg; in build_overlap_sched_groups() local 971 if (!first) in build_overlap_sched_groups() 972 first = sg; in build_overlap_sched_groups() 976 last->next = first; in build_overlap_sched_groups() 978 sd->groups = first; in build_overlap_sched_groups() 983 free_sched_groups(first, 0); in build_overlap_sched_groups() 1107 struct sched_group *first = NULL, *last = NULL; in build_sched_groups() local [all …]
|
D | stop_task.c | 32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
|
D | idle.c | 388 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) in set_next_task_idle() argument
|
D | rt.c | 1564 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) in set_next_task_rt() argument 1571 if (!first) in set_next_task_rt()
|
D | deadline.c | 1746 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) in set_next_task_dl() argument 1753 if (!first) in set_next_task_dl()
|
/kernel/cgroup/ |
D | debug.c | 241 bool first = true; in cgroup_masks_read_one() local 247 if (!first) in cgroup_masks_read_one() 250 first = false; in cgroup_masks_read_one()
|
/kernel/trace/ |
D | trace_output.c | 72 int i, first = 1; in trace_print_flags_seq() local 82 if (!first && delim) in trace_print_flags_seq() 85 first = 0; in trace_print_flags_seq() 91 if (!first && delim) in trace_print_flags_seq() 136 int i, first = 1; in trace_print_flags_seq_u64() local 146 if (!first && delim) in trace_print_flags_seq_u64() 149 first = 0; in trace_print_flags_seq_u64() 155 if (!first && delim) in trace_print_flags_seq_u64()
|
D | Kconfig | 167 Its first purpose is to trace the duration of functions and 175 Enables hooks which will be called when preemption is first disabled, 688 "START". The second string records the cold cache time of the first 697 first=3672 [COLD CACHED] 698 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 699 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 700 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 701 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 702 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 703 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
|
D | trace_event_perf.c | 454 head.first = &event->hlist_entry; in perf_ftrace_function_call()
|
D | trace_events.c | 2206 bool first = false; in trace_event_eval_update() local 2214 first = true; in trace_event_eval_update() 2232 if (first) { in trace_event_eval_update() 2234 first = false; in trace_event_eval_update()
|
/kernel/locking/ |
D | mutex.c | 931 bool first = false; in __mutex_lock_common() local 1039 if ((use_ww_ctx && ww_ctx) || !first) { in __mutex_lock_common() 1040 first = __mutex_waiter_is_first(lock, &waiter); in __mutex_lock_common() 1041 if (first) in __mutex_lock_common() 1052 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) in __mutex_lock_common()
|
/kernel/debug/kdb/ |
D | kdb_cmds | 7 # Standard debugging information for first level support, just type archkdb
|
/kernel/time/ |
D | timer.c | 1435 timer = hlist_entry(head->first, struct timer_list, entry); in expire_timers() 1951 timer = hlist_entry(head->first, struct timer_list, entry); in migrate_timer_list()
|
/kernel/power/ |
D | Kconfig | 102 suspended image to. It will simply pick the first available swap
|