/kernel/ |
D | exec_domain.c | 64 for (ep = exec_domains; ep; ep = ep->next) { in lookup_exec_domain() 75 for (ep = exec_domains; ep; ep = ep->next) { in lookup_exec_domain() 97 if (ep->next != NULL) in register_exec_domain() 101 for (tmp = exec_domains; tmp; tmp = tmp->next) { in register_exec_domain() 106 ep->next = exec_domains; in register_exec_domain() 122 for (epp = &exec_domains; *epp; epp = &(*epp)->next) { in unregister_exec_domain() 130 *epp = ep->next; in unregister_exec_domain() 131 ep->next = NULL; in unregister_exec_domain() 184 for (ep = exec_domains; ep; ep = ep->next) in execdomains_proc_show()
|
D | resource.c | 94 .next = r_next, 372 struct resource *first, *next; in __insert_resource() local 388 for (next = first; ; next = next->sibling) { in __insert_resource() 390 if (next->start < new->start || next->end > new->end) in __insert_resource() 391 return next; in __insert_resource() 392 if (!next->sibling) in __insert_resource() 394 if (next->sibling->start > new->end) in __insert_resource() 399 new->sibling = next->sibling; in __insert_resource() 402 next->sibling = NULL; in __insert_resource() 403 for (next = first; next; next = next->sibling) in __insert_resource() [all …]
|
D | softirq.c | 341 t->next = NULL; in __tasklet_schedule() 343 __get_cpu_var(tasklet_vec).tail = &(t->next); in __tasklet_schedule() 355 t->next = NULL; in __tasklet_hi_schedule() 357 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); in __tasklet_hi_schedule() 377 list = list->next; in tasklet_action() 391 t->next = NULL; in tasklet_action() 393 __get_cpu_var(tasklet_vec).tail = &(t->next); in tasklet_action() 412 list = list->next; in tasklet_hi_action() 426 t->next = NULL; in tasklet_hi_action() 428 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); in tasklet_hi_action() [all …]
|
D | sched_rt.c | 262 goto next; in do_balance_runtime() 281 next: in do_balance_runtime() 868 struct sched_rt_entity *next = NULL; in pick_next_rt_entity() local 876 next = list_entry(queue->next, struct sched_rt_entity, run_list); in pick_next_rt_entity() 878 return next; in pick_next_rt_entity() 931 struct task_struct *next = NULL; in pick_next_highest_task_rt() local 943 if (next && next->prio < idx) in pick_next_highest_task_rt() 948 next = p; in pick_next_highest_task_rt() 952 if (!next) { in pick_next_highest_task_rt() 958 return next; in pick_next_highest_task_rt() [all …]
|
D | printk.c | 447 for (con = console_drivers; con; con = con->next) { in __call_console_drivers() 579 for (con = console_drivers; con; con = con->next) in have_callable_console() 1106 for (c = console_drivers; c != NULL; c = c->next) in console_unblank() 1121 for (c = console_drivers; c != NULL; c = c->next) { in console_device() 1251 console->next = console_drivers; in register_console() 1253 if (console->next) in register_console() 1254 console->next->flags &= ~CON_CONSDEV; in register_console() 1256 console->next = console_drivers->next; in register_console() 1257 console_drivers->next = console; in register_console() 1284 console_drivers=console->next; in unregister_console() [all …]
|
D | notifier.c | 27 nl = &((*nl)->next); in notifier_chain_register() 29 n->next = *nl; in notifier_chain_register() 42 nl = &((*nl)->next); in notifier_chain_cond_register() 44 n->next = *nl; in notifier_chain_cond_register() 54 rcu_assign_pointer(*nl, n->next); in notifier_chain_unregister() 57 nl = &((*nl)->next); in notifier_chain_unregister() 84 next_nb = rcu_dereference(nb->next); in notifier_call_chain()
|
D | lockdep.c | 1186 struct held_lock *next, in print_bad_irq_dependency() argument 1205 print_lock(next); in print_bad_irq_dependency() 1212 print_lock_name(hlock_class(next)); in print_bad_irq_dependency() 1246 struct held_lock *next, enum lock_usage_bit bit_backwards, in check_usage() argument 1258 ret = find_usage_forwards(hlock_class(next), 0); in check_usage() 1262 return print_bad_irq_dependency(curr, prev, next, in check_usage() 1268 struct held_lock *next) in check_prev_add_irq() argument 1276 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, in check_prev_add_irq() 1286 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, in check_prev_add_irq() 1296 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, in check_prev_add_irq() [all …]
|
D | rtmutex.c | 306 struct rt_mutex_waiter *next; in try_to_steal_lock() local 332 next = rt_mutex_top_waiter(lock); in try_to_steal_lock() 333 plist_del(&next->pi_list_entry, &pendowner->pi_waiters); in try_to_steal_lock() 351 if (likely(next->task != current)) { in try_to_steal_lock() 353 plist_add(&next->pi_list_entry, ¤t->pi_waiters); in try_to_steal_lock() 519 struct rt_mutex_waiter *next; in wakeup_next_waiter() local 521 next = rt_mutex_top_waiter(lock); in wakeup_next_waiter() 522 plist_add(&next->pi_list_entry, &pendowner->pi_waiters); in wakeup_next_waiter() 555 struct rt_mutex_waiter *next; in remove_waiter() local 557 next = rt_mutex_top_waiter(lock); in remove_waiter() [all …]
|
D | sched_stats.h | 252 __sched_info_switch(struct task_struct *prev, struct task_struct *next) in __sched_info_switch() argument 264 if (next != rq->idle) in __sched_info_switch() 265 sched_info_arrive(next); in __sched_info_switch() 268 sched_info_switch(struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument 271 __sched_info_switch(prev, next); in sched_info_switch() 277 #define sched_info_switch(t, next) do { } while (0) argument
|
D | futex.c | 471 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() local 486 next = head->next; in exit_pi_state_list() 487 pi_state = list_entry(next, struct futex_pi_state, list); in exit_pi_state_list() 499 if (head->next != next) { in exit_pi_state_list() 524 struct futex_q *this, *next; in lookup_pi_state() local 531 plist_for_each_entry_safe(this, next, head, list) { in lookup_pi_state() 734 struct futex_q *this, *next; in futex_wake() local 750 plist_for_each_entry_safe(this, next, head, list) { in futex_wake() 784 struct futex_q *this, *next; in futex_wake_op() local 847 plist_for_each_entry_safe(this, next, head, list) { in futex_wake_op() [all …]
|
D | audit_tree.c | 431 struct audit_krule *rule, *next; in kill_rules() local 435 list_for_each_entry_safe(rule, next, &tree->rules, rlist) { in kill_rules() 468 p = list_entry(victim->chunks.next, struct node, list); in prune_one() 487 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 489 q = p->next; in trim_marked() 499 node = list_entry(tree->chunks.next, struct node, list); in trim_marked() 551 while (cursor.next != &tree_list) { in audit_trim_trees() 559 tree = container_of(cursor.next, struct audit_tree, list); in audit_trim_trees() 744 while (cursor.next != &tree_list) { in audit_tag_tree() 748 tree = container_of(cursor.next, struct audit_tree, list); in audit_tag_tree() [all …]
|
D | sched_fair.c | 727 if (cfs_rq->next == se) in __clear_buddies() 728 cfs_rq->next = NULL; in __clear_buddies() 824 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) in pick_next_entity() 825 return cfs_rq->next; in pick_next_entity() 1372 cfs_rq_of(se)->next = se; in set_next_buddy() 1509 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) in __load_balance_iterator() argument 1514 if (next == &cfs_rq->tasks) in __load_balance_iterator() 1517 se = list_entry(next, struct sched_entity, group_node); in __load_balance_iterator() 1519 cfs_rq->balance_iterator = next->next; in __load_balance_iterator() 1528 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next); in load_balance_start_fair() [all …]
|
D | rcuclassic.c | 133 head->next = NULL; in __call_rcu() 159 rdp->nxttail[2] = &head->next; in __call_rcu() 333 struct rcu_head *next, *list; in rcu_do_batch() local 338 next = list->next; in rcu_do_batch() 339 prefetch(next); in rcu_do_batch() 341 list = next; in rcu_do_batch()
|
D | sched.c | 420 struct sched_entity *curr, *next, *last; member 865 # define prepare_arch_switch(next) do { } while (0) argument 882 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) in prepare_lock_switch() argument 912 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) in prepare_lock_switch() argument 920 next->oncpu = 1; in prepare_lock_switch() 1380 struct task_struct *(*next)(void *); member 1667 for (class = sched_class_highest; class; class = class->next) 1851 (&p->se == cfs_rq_of(&p->se)->next || in task_hot() 2146 } while (group = group->next, group != sd->groups); in find_idlest_group() 2526 struct task_struct *next) in fire_sched_out_preempt_notifiers() argument [all …]
|
D | rcupreempt.c | 1097 struct rcu_head *next, *list; in rcu_process_callbacks() local 1113 next = list->next; in rcu_process_callbacks() 1115 list = next; in rcu_process_callbacks() 1126 head->next = NULL; in call_rcu() 1132 rdp->nexttail = &head->next; in call_rcu() 1145 head->next = NULL; in call_rcu_sched() 1150 rdp->nextschedtail = &head->next; in call_rcu_sched()
|
D | posix-cpu-timers.c | 476 struct cpu_timer_list *timer, *next; in cleanup_timers() local 479 list_for_each_entry_safe(timer, next, head, entry) { in cleanup_timers() 490 list_for_each_entry_safe(timer, next, head, entry) { in cleanup_timers() 501 list_for_each_entry_safe(timer, next, head, entry) { in cleanup_timers() 554 struct cpu_timer_list *next; in arm_timer() local 566 list_for_each_entry(next, head, entry) { in arm_timer() 567 if (next->expires.sched > nt->expires.sched) in arm_timer() 569 listpos = &next->entry; in arm_timer() 572 list_for_each_entry(next, head, entry) { in arm_timer() 573 if (cputime_gt(next->expires.cpu, nt->expires.cpu)) in arm_timer() [all …]
|
/kernel/time/ |
D | tick-common.c | 82 ktime_t next; in tick_handle_periodic() local 92 next = ktime_add(dev->next_event, tick_period); in tick_handle_periodic() 94 if (!clockevents_program_event(dev, next, ktime_get())) in tick_handle_periodic() 97 next = ktime_add(next, tick_period); in tick_handle_periodic() 117 ktime_t next; in tick_setup_periodic() local 121 next = tick_next_period; in tick_setup_periodic() 127 if (!clockevents_program_event(dev, next, ktime_get())) in tick_setup_periodic() 129 next = ktime_add(next, tick_period); in tick_setup_periodic()
|
D | ntp.c | 226 struct timespec now, next; in sync_cmos_clock() local 247 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); in sync_cmos_clock() 248 if (next.tv_nsec <= 0) in sync_cmos_clock() 249 next.tv_nsec += NSEC_PER_SEC; in sync_cmos_clock() 252 next.tv_sec = 659; in sync_cmos_clock() 254 next.tv_sec = 0; in sync_cmos_clock() 256 if (next.tv_nsec >= NSEC_PER_SEC) { in sync_cmos_clock() 257 next.tv_sec++; in sync_cmos_clock() 258 next.tv_nsec -= NSEC_PER_SEC; in sync_cmos_clock() 260 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); in sync_cmos_clock()
|
D | clocksource.c | 275 struct clocksource *next; in select_clocksource() local 281 next = clocksource_override; in select_clocksource() 283 next = list_entry(clocksource_list.next, struct clocksource, in select_clocksource() 286 if (next == curr_clocksource) in select_clocksource() 289 return next; in select_clocksource()
|
D | timer_stats.c | 57 struct entry *next; member 183 curr = curr->next; in tstat_lookup() 200 curr = curr->next; in tstat_lookup() 207 curr->next = NULL; in tstat_lookup() 213 prev->next = curr; in tstat_lookup()
|
D | tick-sched.c | 532 ktime_t next; in tick_nohz_switch_to_nohz() local 551 next = tick_init_jiffy_update(); in tick_nohz_switch_to_nohz() 554 hrtimer_set_expires(&ts->sched_timer, next); in tick_nohz_switch_to_nohz() 555 if (!tick_program_event(next, 0)) in tick_nohz_switch_to_nohz() 557 next = ktime_add(next, tick_period); in tick_nohz_switch_to_nohz()
|
D | tick-broadcast.c | 178 ktime_t next; in tick_handle_periodic_broadcast() local 195 for (next = dev->next_event; ;) { in tick_handle_periodic_broadcast() 196 next = ktime_add(next, tick_period); in tick_handle_periodic_broadcast() 198 if (!clockevents_program_event(dev, next, ktime_get())) in tick_handle_periodic_broadcast()
|
/kernel/trace/ |
D | ftrace.c | 89 op = op->next; in ftrace_list_func() 140 ops->next = ftrace_list; in __register_ftrace_function() 153 if (ops->next == &ftrace_list_end) in __register_ftrace_function() 192 if (ftrace_list == ops && ops->next == &ftrace_list_end) { in __unregister_ftrace_function() 198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) in __unregister_ftrace_function() 207 *p = (*p)->next; in __unregister_ftrace_function() 211 if (ftrace_list->next == &ftrace_list_end) { in __unregister_ftrace_function() 292 struct ftrace_page *next; member 360 for (pg = ftrace_pages_start; pg; pg = pg->next) { in ftrace_release() 391 if (!ftrace_pages->next) { in ftrace_alloc_dyn_node() [all …]
|
D | trace_sched_switch.c | 24 struct task_struct *next) in probe_sched_switch() argument 35 tracing_record_cmdline(next); in probe_sched_switch() 46 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); in probe_sched_switch()
|
/kernel/power/ |
D | snapshot.c | 130 struct linked_page *next; member 138 struct linked_page *lp = list->next; in free_list_of_pages() 187 lp->next = ca->chain; in chain_alloc() 262 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); in memory_bm_position_reset() 561 bb = list_entry(bb->hook.next, struct bm_block, hook); in memory_bm_next_pfn() 1507 struct highmem_pbe *next; member 1644 safe_pages_list = safe_pages_list->next; in get_highmem_page_buffer() 1647 pbe->next = highmem_pblist; in get_highmem_page_buffer() 1765 lp->next = sp_list; in prepare_image() 1780 lp->next = safe_pages_list; in prepare_image() [all …]
|