/kernel/sched/ |
D | wait.c | 24 unsigned long flags; in add_wait_queue() local 26 wait->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue() 27 spin_lock_irqsave(&q->lock, flags); in add_wait_queue() 29 spin_unlock_irqrestore(&q->lock, flags); in add_wait_queue() 35 unsigned long flags; in add_wait_queue_exclusive() local 37 wait->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive() 38 spin_lock_irqsave(&q->lock, flags); in add_wait_queue_exclusive() 40 spin_unlock_irqrestore(&q->lock, flags); in add_wait_queue_exclusive() 46 unsigned long flags; in remove_wait_queue() local 48 spin_lock_irqsave(&q->lock, flags); in remove_wait_queue() [all …]
|
D | completion.c | 31 unsigned long flags; in complete() local 33 spin_lock_irqsave(&x->wait.lock, flags); in complete() 36 spin_unlock_irqrestore(&x->wait.lock, flags); in complete() 51 unsigned long flags; in complete_all() local 53 spin_lock_irqsave(&x->wait.lock, flags); in complete_all() 56 spin_unlock_irqrestore(&x->wait.lock, flags); in complete_all() 267 unsigned long flags; in try_wait_for_completion() local 270 spin_lock_irqsave(&x->wait.lock, flags); in try_wait_for_completion() 275 spin_unlock_irqrestore(&x->wait.lock, flags); in try_wait_for_completion() 290 unsigned long flags; in completion_done() local [all …]
|
/kernel/irq/ |
D | manage.c | 40 unsigned long flags; in __synchronize_hardirq() local 50 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq() 52 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq() 224 unsigned long flags; in __irq_set_affinity() local 230 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity() 232 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity() 238 unsigned long flags; in irq_set_affinity_hint() local 239 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_affinity_hint() 244 irq_put_desc_unlock(desc, flags); in irq_set_affinity_hint() 255 unsigned long flags; in irq_affinity_notify() local [all …]
|
D | pm.c | 38 if (action->flags & IRQF_FORCE_RESUME) in irq_pm_install_action() 44 if (action->flags & IRQF_NO_SUSPEND) in irq_pm_install_action() 59 if (action->flags & IRQF_FORCE_RESUME) in irq_pm_remove_action() 62 if (action->flags & IRQF_NO_SUSPEND) in irq_pm_remove_action() 91 if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) in suspend_device_irq() 118 unsigned long flags; in suspend_device_irqs() local 121 raw_spin_lock_irqsave(&desc->lock, flags); in suspend_device_irqs() 123 raw_spin_unlock_irqrestore(&desc->lock, flags); in suspend_device_irqs() 155 unsigned long flags; in resume_irqs() local 157 desc->action->flags & IRQF_EARLY_RESUME; in resume_irqs() [all …]
|
D | chip.c | 30 unsigned long flags; in irq_set_chip() local 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_chip() 40 irq_put_desc_unlock(desc, flags); in irq_set_chip() 57 unsigned long flags; in irq_set_irq_type() local 58 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_type() 66 irq_put_desc_busunlock(desc, flags); in irq_set_irq_type() 80 unsigned long flags; in irq_set_handler_data() local 81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_handler_data() 86 irq_put_desc_unlock(desc, flags); in irq_set_handler_data() 102 unsigned long flags; in irq_set_msi_desc_off() local [all …]
|
/kernel/locking/ |
D | rwsem-spinlock.c | 26 unsigned long flags; in rwsem_is_locked() local 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { in rwsem_is_locked() 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_is_locked() 125 unsigned long flags; in __down_read() local 127 raw_spin_lock_irqsave(&sem->wait_lock, flags); in __down_read() 132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_read() 147 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_read() 167 unsigned long flags; in __down_read_trylock() local 171 raw_spin_lock_irqsave(&sem->wait_lock, flags); in __down_read_trylock() 179 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_read_trylock() [all …]
|
D | semaphore.c | 55 unsigned long flags; in down() local 57 raw_spin_lock_irqsave(&sem->lock, flags); in down() 62 raw_spin_unlock_irqrestore(&sem->lock, flags); in down() 77 unsigned long flags; in down_interruptible() local 80 raw_spin_lock_irqsave(&sem->lock, flags); in down_interruptible() 85 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_interruptible() 103 unsigned long flags; in down_killable() local 106 raw_spin_lock_irqsave(&sem->lock, flags); in down_killable() 111 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_killable() 132 unsigned long flags; in down_trylock() local [all …]
|
D | spinlock.c | 79 unsigned long flags; \ 83 local_irq_save(flags); \ 86 local_irq_restore(flags); \ 95 return flags; \ 105 unsigned long flags; \ 112 flags = _raw_##op##_lock_irqsave(lock); \ 114 local_irq_restore(flags); \ 189 void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) in _raw_spin_unlock_irqrestore() argument 191 __raw_spin_unlock_irqrestore(lock, flags); in _raw_spin_unlock_irqrestore() 261 void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) in _raw_read_unlock_irqrestore() argument [all …]
|
D | rtmutex.c | 305 unsigned long flags; in rt_mutex_adjust_prio() local 307 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio() 309 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio() 423 unsigned long flags; in rt_mutex_adjust_prio_chain() local 466 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain() 550 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain() 581 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain() 596 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain() 611 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain() 633 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain() [all …]
|
/kernel/ |
D | freezer.c | 42 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) in freezing_slow_path() 51 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path() 72 current->flags |= PF_FROZEN; in __refrigerator() 75 current->flags &= ~PF_FROZEN; in __refrigerator() 78 if (!(current->flags & PF_FROZEN)) in __refrigerator() 99 unsigned long flags; in fake_signal_wake_up() local 101 if (lock_task_sighand(p, &flags)) { in fake_signal_wake_up() 103 unlock_task_sighand(p, &flags); in fake_signal_wake_up() 120 unsigned long flags; in freeze_task() local 134 spin_lock_irqsave(&freezer_lock, flags); in freeze_task() [all …]
|
D | up.c | 13 unsigned long flags; in smp_call_function_single() local 17 local_irq_save(flags); in smp_call_function_single() 19 local_irq_restore(flags); in smp_call_function_single() 27 unsigned long flags; in smp_call_function_single_async() local 29 local_irq_save(flags); in smp_call_function_single_async() 31 local_irq_restore(flags); in smp_call_function_single_async() 38 unsigned long flags; in on_each_cpu() local 40 local_irq_save(flags); in on_each_cpu() 42 local_irq_restore(flags); in on_each_cpu() 56 unsigned long flags; in on_each_cpu_mask() local [all …]
|
D | irq_work.c | 31 unsigned long flags, oflags, nflags; in irq_work_claim() local 37 flags = work->flags & ~IRQ_WORK_PENDING; in irq_work_claim() 39 nflags = flags | IRQ_WORK_FLAGS; in irq_work_claim() 40 oflags = cmpxchg(&work->flags, flags, nflags); in irq_work_claim() 41 if (oflags == flags) in irq_work_claim() 45 flags = oflags; in irq_work_claim() 97 if (work->flags & IRQ_WORK_LAZY) { in irq_work_queue() 131 unsigned long flags; in irq_work_run_list() local 153 flags = work->flags & ~IRQ_WORK_PENDING; in irq_work_run_list() 154 xchg(&work->flags, flags); in irq_work_run_list() [all …]
|
D | kthread.c | 42 unsigned long flags; member 81 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); in kthread_should_stop() 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); in kthread_should_park() 160 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { in __kthread_parkme() 161 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) in __kthread_parkme() 166 clear_bit(KTHREAD_IS_PARKED, &self->flags); in __kthread_parkme() 185 self.flags = 0; in kthread() 205 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { in kthread() 330 unsigned long flags; in __kthread_bind_mask() local 338 raw_spin_lock_irqsave(&p->pi_lock, flags); in __kthread_bind_mask() [all …]
|
/kernel/rcu/ |
D | tiny.c | 86 unsigned long flags; in rcu_idle_enter() local 89 local_irq_save(flags); in rcu_idle_enter() 97 local_irq_restore(flags); in rcu_idle_enter() 106 unsigned long flags; in rcu_irq_exit() local 109 local_irq_save(flags); in rcu_irq_exit() 113 local_irq_restore(flags); in rcu_irq_exit() 143 unsigned long flags; in rcu_idle_exit() local 146 local_irq_save(flags); in rcu_idle_exit() 154 local_irq_restore(flags); in rcu_idle_exit() 163 unsigned long flags; in rcu_irq_enter() local [all …]
|
D | tree.c | 244 unsigned long flags; in rcu_momentary_dyntick_idle() local 250 local_irq_save(flags); in rcu_momentary_dyntick_idle() 281 local_irq_restore(flags); in rcu_momentary_dyntick_idle() 398 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, in rcutorture_get_gp_data() argument 417 *flags = ACCESS_ONCE(rsp->gp_flags); in rcutorture_get_gp_data() 422 *flags = 0; in rcutorture_get_gp_data() 588 unsigned long flags; in rcu_idle_enter() local 590 local_irq_save(flags); in rcu_idle_enter() 593 local_irq_restore(flags); in rcu_idle_enter() 630 unsigned long flags; in rcu_irq_exit() local [all …]
|
/kernel/trace/ |
D | trace_functions_graph.c | 116 u32 flags); 283 unsigned long flags, in __trace_graph_entry() argument 295 sizeof(*entry), flags, pc); in __trace_graph_entry() 318 unsigned long flags; in trace_graph_entry() local 343 local_irq_save(flags); in trace_graph_entry() 349 ret = __trace_graph_entry(tr, trace, flags, pc); in trace_graph_entry() 355 local_irq_restore(flags); in trace_graph_entry() 370 unsigned long ip, unsigned long flags, int pc) in __trace_graph_function() argument 384 __trace_graph_entry(tr, &ent, flags, pc); in __trace_graph_function() 385 __trace_graph_return(tr, &ret, flags, pc); in __trace_graph_function() [all …]
|
D | trace_irqsoff.c | 105 unsigned long *flags) in func_prolog_dec() argument 120 local_save_flags(*flags); in func_prolog_dec() 122 if (!irqs_disabled_flags(*flags)) in func_prolog_dec() 145 unsigned long flags; in irqsoff_tracer_call() local 147 if (!func_prolog_dec(tr, &data, &flags)) in irqsoff_tracer_call() 150 trace_function(tr, ip, parent_ip, flags, preempt_count()); in irqsoff_tracer_call() 183 unsigned long flags; in irqsoff_graph_entry() local 187 if (!func_prolog_dec(tr, &data, &flags)) in irqsoff_graph_entry() 191 ret = __trace_graph_entry(tr, trace, flags, pc); in irqsoff_graph_entry() 201 unsigned long flags; in irqsoff_graph_return() local [all …]
|
D | trace_sched_wakeup.c | 120 unsigned long flags; in wakeup_tracer_call() local 126 local_irq_save(flags); in wakeup_tracer_call() 127 trace_function(tr, ip, parent_ip, flags, pc); in wakeup_tracer_call() 128 local_irq_restore(flags); in wakeup_tracer_call() 230 unsigned long flags; in wakeup_graph_entry() local 236 local_save_flags(flags); in wakeup_graph_entry() 237 ret = __trace_graph_entry(tr, trace, flags, pc); in wakeup_graph_entry() 248 unsigned long flags; in wakeup_graph_return() local 254 local_save_flags(flags); in wakeup_graph_return() 255 __trace_graph_return(tr, trace, flags, pc); in wakeup_graph_return() [all …]
|
D | trace_sched_switch.c | 28 unsigned long flags, int pc) in tracing_sched_switch_trace() argument 36 sizeof(*entry), flags, pc); in tracing_sched_switch_trace() 49 trace_buffer_unlock_commit(buffer, event, flags, pc); in tracing_sched_switch_trace() 56 unsigned long flags; in probe_sched_switch() local 70 local_irq_save(flags); in probe_sched_switch() 75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); in probe_sched_switch() 77 local_irq_restore(flags); in probe_sched_switch() 84 unsigned long flags, int pc) in tracing_sched_wakeup_trace() argument 92 sizeof(*entry), flags, pc); in tracing_sched_wakeup_trace() 105 trace_buffer_unlock_commit(buffer, event, flags, pc); in tracing_sched_wakeup_trace() [all …]
|
D | trace_output.c | 73 unsigned long flags, in ftrace_print_flags_seq() argument 81 for (i = 0; flag_array[i].name && flags; i++) { in ftrace_print_flags_seq() 84 if ((flags & mask) != mask) in ftrace_print_flags_seq() 88 flags &= ~mask; in ftrace_print_flags_seq() 97 if (flags) { in ftrace_print_flags_seq() 100 trace_seq_printf(p, "0x%lx", flags); in ftrace_print_flags_seq() 418 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; in trace_print_lat_fmt() 419 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; in trace_print_lat_fmt() 422 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : in trace_print_lat_fmt() 423 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : in trace_print_lat_fmt() [all …]
|
/kernel/time/ |
D | alarmtimer.c | 65 unsigned long flags; in alarmtimer_get_rtcdev() local 68 spin_lock_irqsave(&rtcdev_lock, flags); in alarmtimer_get_rtcdev() 70 spin_unlock_irqrestore(&rtcdev_lock, flags); in alarmtimer_get_rtcdev() 79 unsigned long flags; in alarmtimer_rtc_add_device() local 90 spin_lock_irqsave(&rtcdev_lock, flags); in alarmtimer_rtc_add_device() 96 spin_unlock_irqrestore(&rtcdev_lock, flags); in alarmtimer_rtc_add_device() 179 unsigned long flags; in alarmtimer_fired() local 183 spin_lock_irqsave(&base->lock, flags); in alarmtimer_fired() 185 spin_unlock_irqrestore(&base->lock, flags); in alarmtimer_fired() 190 spin_lock_irqsave(&base->lock, flags); in alarmtimer_fired() [all …]
|
D | clocksource.c | 139 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable() 140 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable() 163 unsigned long flags; in clocksource_mark_unstable() local 165 spin_lock_irqsave(&watchdog_lock, flags); in clocksource_mark_unstable() 166 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable() 171 spin_unlock_irqrestore(&watchdog_lock, flags); in clocksource_mark_unstable() 190 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog() 202 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog() 204 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog() 228 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog() [all …]
|
D | posix-timers.c | 132 static int common_nsleep(const clockid_t, int flags, struct timespec *t, 142 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); 144 #define lock_timer(tid, flags) \ argument 146 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ 200 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) in unlock_timer() argument 202 spin_unlock_irqrestore(&timr->it_lock, flags); in unlock_timer() 382 unsigned long flags; in do_schedule_next_timer() local 384 timr = lock_timer(info->si_tid, &flags); in do_schedule_next_timer() 396 unlock_timer(timr, flags); in do_schedule_next_timer() 438 unsigned long flags; in posix_timer_fn() local [all …]
|
D | tick-broadcast.c | 159 unsigned long flags; in tick_device_uses_broadcast() local 162 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); in tick_device_uses_broadcast() 232 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); in tick_device_uses_broadcast() 335 unsigned long flags; in tick_do_broadcast_on_off() local 338 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); in tick_do_broadcast_on_off() 392 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); in tick_do_broadcast_on_off() 425 unsigned long flags; in tick_shutdown_broadcast() local 428 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); in tick_shutdown_broadcast() 439 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); in tick_shutdown_broadcast() 445 unsigned long flags; in tick_suspend_broadcast() local [all …]
|
/kernel/printk/ |
D | printk.c | 228 u8 flags:5; /* internal record flags */ member 416 enum log_flags flags, u64 ts_nsec, in log_store() argument 458 msg->flags = flags & 0x1f; in log_store() 630 if (msg->flags & LOG_CONT && !(user->prev & LOG_CONT)) in devkmsg_read() 632 else if ((msg->flags & LOG_CONT) || in devkmsg_read() 633 ((user->prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))) in devkmsg_read() 639 user->prev = msg->flags; in devkmsg_read() 894 unsigned long flags; in setup_log_buf() local 921 raw_spin_lock_irqsave(&logbuf_lock, flags); in setup_log_buf() 927 raw_spin_unlock_irqrestore(&logbuf_lock, flags); in setup_log_buf() [all …]
|