Home
last modified time | relevance | path

Searched refs:prev (Results 1 – 22 of 22) sorted by relevance

/kernel/
Dsched_stats.h252 __sched_info_switch(struct task_struct *prev, struct task_struct *next) in __sched_info_switch() argument
254 struct rq *rq = task_rq(prev); in __sched_info_switch()
261 if (prev != rq->idle) in __sched_info_switch()
262 sched_info_depart(prev); in __sched_info_switch()
268 sched_info_switch(struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument
271 __sched_info_switch(prev, next); in sched_info_switch()
Dlockdep.c1185 struct held_lock *prev, in print_bad_irq_dependency() argument
1208 print_lock(prev); in print_bad_irq_dependency()
1210 print_lock_name(hlock_class(prev)); in print_bad_irq_dependency()
1245 check_usage(struct task_struct *curr, struct held_lock *prev, in check_usage() argument
1253 ret = find_usage_backwards(hlock_class(prev), 0); in check_usage()
1262 return print_bad_irq_dependency(curr, prev, next, in check_usage()
1267 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, in check_prev_add_irq() argument
1276 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, in check_prev_add_irq()
1286 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, in check_prev_add_irq()
1296 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, in check_prev_add_irq()
[all …]
Dmutex.c349 int prev; in __mutex_trylock_slowpath() local
353 prev = atomic_xchg(&lock->count, -1); in __mutex_trylock_slowpath()
354 if (likely(prev == 1)) { in __mutex_trylock_slowpath()
364 return prev == 1; in __mutex_trylock_slowpath()
Dsched.c868 # define finish_arch_switch(prev) do { } while (0) argument
886 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) in finish_lock_switch() argument
929 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) in finish_lock_switch() argument
938 prev->oncpu = 0; in finish_lock_switch()
2563 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
2566 fire_sched_out_preempt_notifiers(prev, next); in prepare_task_switch()
2586 static void finish_task_switch(struct rq *rq, struct task_struct *prev) in finish_task_switch() argument
2605 prev_state = prev->state; in finish_task_switch()
2606 finish_arch_switch(prev); in finish_task_switch()
2607 finish_lock_switch(rq, prev); in finish_task_switch()
[all …]
Dworkqueue.c437 struct list_head *prev; in flush_work() local
448 prev = NULL; in flush_work()
458 prev = &work->entry; in flush_work()
462 prev = &cwq->worklist; in flush_work()
464 insert_wq_barrier(cwq, &barr, prev->next); in flush_work()
467 if (!prev) in flush_work()
Dsched_fair.c833 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
839 if (prev->on_rq) in put_prev_entity()
842 check_spread(cfs_rq, prev); in put_prev_entity()
843 if (prev->on_rq) { in put_prev_entity()
844 update_stats_wait_start(cfs_rq, prev); in put_prev_entity()
846 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
1485 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) in put_prev_task_fair() argument
1487 struct sched_entity *se = &prev->se; in put_prev_task_fair()
Dmutex.h17 __list_del((waiter)->list.prev, (waiter)->list.next)
Dsched_idletask.c42 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) in put_prev_task_idle() argument
Dmutex-debug.c84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); in debug_mutex_unlock()
Dsched_rt.c1260 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) in pre_schedule_rt() argument
1263 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) in pre_schedule_rt()
1501 if (p->rt.run_list.prev != p->rt.run_list.next) { in task_tick_rt()
Dsysctl.c1558 struct ctl_table_header *prev) in __sysctl_head_next() argument
1566 if (prev) { in __sysctl_head_next()
1567 head = prev; in __sysctl_head_next()
1568 tmp = &prev->ctl_entry; in __sysctl_head_next()
1569 unuse_table(prev); in __sysctl_head_next()
1601 struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev) in sysctl_head_next() argument
1603 return __sysctl_head_next(current->nsproxy, prev); in sysctl_head_next()
Dtimer.c432 timer->entry.prev == TIMER_ENTRY_STATIC) { in timer_fixup_activate()
554 __list_del(entry->prev, entry->next); in detach_timer()
557 entry->prev = LIST_POISON2; in detach_timer()
Daudit_tree.c794 while (barrier.prev != &tree_list) { in audit_tag_tree()
797 tree = container_of(barrier.prev, struct audit_tree, list); in audit_tag_tree()
Dkexec.c1409 VMCOREINFO_OFFSET(list_head, prev); in crash_save_vmcoreinfo_init()
/kernel/time/
Dtimer_stats.c169 struct entry **head, *curr, *prev; in tstat_lookup() local
188 prev = NULL; in tstat_lookup()
199 prev = curr; in tstat_lookup()
212 if (prev) in tstat_lookup()
213 prev->next = curr; in tstat_lookup()
/kernel/trace/
Dtrace_sched_switch.c23 probe_sched_switch(struct rq *__rq, struct task_struct *prev, in probe_sched_switch() argument
34 tracing_record_cmdline(prev); in probe_sched_switch()
46 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); in probe_sched_switch()
Dtrace_sched_wakeup.c108 probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, in probe_wakeup_sched_switch() argument
119 tracing_record_cmdline(prev); in probe_wakeup_sched_switch()
Dring_buffer.c314 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) in rb_check_pages()
316 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) in rb_check_pages()
321 bpage->list.next->prev != &bpage->list)) in rb_check_pages()
324 bpage->list.prev->next != &bpage->list)) in rb_check_pages()
1761 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
1767 reader->list.prev->next = &cpu_buffer->reader_page->list; in rb_get_reader_page()
1768 reader->list.next->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
Dtrace.h395 struct task_struct *prev,
451 struct task_struct *prev,
Dtrace.c1083 struct task_struct *prev, argument
1098 entry->prev_pid = prev->pid;
1099 entry->prev_prio = prev->prio;
1100 entry->prev_state = prev->state;
/kernel/power/
Dsnapshot.c397 bb = list_entry(bm->blocks.prev, struct bm_block, hook); in memory_bm_create()
399 error = create_bm_block_list(pages, bm->blocks.prev, &ca); in memory_bm_create()
604 region = list_entry(nosave_regions.prev, in __register_nosave_region()
1355 if (handle->prev < handle->cur) { in snapshot_read_next()
1378 handle->prev = handle->cur; in snapshot_read_next()
1869 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) in snapshot_write_next()
1883 if (handle->prev < handle->cur) { in snapshot_write_next()
1884 if (handle->prev == 0) { in snapshot_write_next()
1893 } else if (handle->prev <= nr_meta_pages) { in snapshot_write_next()
1898 if (handle->prev == nr_meta_pages) { in snapshot_write_next()
[all …]
Dpower.h109 unsigned int prev; /* number of the block of PAGE_SIZE bytes that member