/kernel/sched/ |
D | wait.c | 20 unsigned long flags; in add_wait_queue() local 22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue() 23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue() 25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue() 31 unsigned long flags; in add_wait_queue_exclusive() local 33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive() 34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive() 36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive() 42 unsigned long flags; in add_wait_queue_priority() local 44 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; in add_wait_queue_priority() [all …]
|
D | isolation.c | 28 unsigned long flags; member 35 return !!(housekeeping.flags & BIT(type)); in housekeeping_enabled() 44 if (housekeeping.flags & BIT(type)) { in housekeeping_any_cpu() 59 if (housekeeping.flags & BIT(type)) in housekeeping_cpumask() 68 if (housekeeping.flags & BIT(type)) in housekeeping_affine() 76 if (housekeeping.flags & BIT(type)) in housekeeping_test_cpu() 86 if (!housekeeping.flags) in housekeeping_init() 91 if (housekeeping.flags & HK_FLAG_TICK) in housekeeping_init() 94 for_each_set_bit(type, &housekeeping.flags, HK_TYPE_MAX) { in housekeeping_init() 109 static int __init housekeeping_setup(char *str, unsigned long flags) in housekeeping_setup() argument [all …]
|
/kernel/ |
D | nsproxy.c | 67 static struct nsproxy *create_new_namespaces(unsigned long flags, in create_new_namespaces() argument 78 new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs); in create_new_namespaces() 84 new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns); in create_new_namespaces() 90 new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns); in create_new_namespaces() 97 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children); in create_new_namespaces() 103 new_nsp->cgroup_ns = copy_cgroup_ns(flags, user_ns, in create_new_namespaces() 110 new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns); in create_new_namespaces() 116 new_nsp->time_ns_for_children = copy_time_ns(flags, user_ns, in create_new_namespaces() 151 int copy_namespaces(unsigned long flags, struct task_struct *tsk) in copy_namespaces() argument 157 if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | in copy_namespaces() [all …]
|
D | kexec.c | 25 unsigned long flags) in kimage_alloc_init() argument 29 bool kexec_on_panic = flags & KEXEC_ON_CRASH; in kimage_alloc_init() 88 struct kexec_segment *segments, unsigned long flags) in do_kexec_load() argument 102 if (flags & KEXEC_ON_CRASH) { in do_kexec_load() 116 if (flags & KEXEC_ON_CRASH) { in do_kexec_load() 125 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 129 if (flags & KEXEC_PRESERVE_CONTEXT) in do_kexec_load() 160 if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) in do_kexec_load() 191 unsigned long flags) in kexec_load_check() argument 216 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) in kexec_load_check() [all …]
|
D | kthread.c | 53 unsigned long flags; member 75 WARN_ON(!(k->flags & PF_KTHREAD)); in to_kthread() 93 if (kthread && !(p->flags & PF_KTHREAD)) in __to_kthread() 157 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); in kthread_should_stop() 163 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); in __kthread_should_park() 191 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK)); in kthread_should_stop_or_park() 280 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) in __kthread_parkme() 383 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { in kthread() 531 unsigned long flags; in __kthread_bind_mask() local 539 raw_spin_lock_irqsave(&p->pi_lock, flags); in __kthread_bind_mask() [all …]
|
D | resource.c | 37 .flags = IORESOURCE_IO, 45 .flags = IORESOURCE_MEM, 165 static struct resource *alloc_resource(gfp_t flags) in alloc_resource() argument 167 return kzalloc(sizeof(struct resource), flags); in alloc_resource() 326 unsigned long flags, unsigned long desc, in find_next_iomem_res() argument 350 if ((p->flags & flags) != flags) in find_next_iomem_res() 364 .flags = p->flags, in find_next_iomem_res() 375 unsigned long flags, unsigned long desc, in __walk_iomem_res_desc() argument 383 !find_next_iomem_res(start, end, flags, desc, &res)) { in __walk_iomem_res_desc() 411 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, in walk_iomem_res_desc() argument [all …]
|
D | freezer.c | 43 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) in freezing_slow_path() 52 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path() 104 unsigned long flags; in fake_signal_wake_up() local 106 if (lock_task_sighand(p, &flags)) { in fake_signal_wake_up() 108 unlock_task_sighand(p, &flags); in fake_signal_wake_up() 164 unsigned long flags; in freeze_task() local 166 spin_lock_irqsave(&freezer_lock, flags); in freeze_task() 168 spin_unlock_irqrestore(&freezer_lock, flags); in freeze_task() 172 if (!(p->flags & PF_KTHREAD)) in freeze_task() 177 spin_unlock_irqrestore(&freezer_lock, flags); in freeze_task() [all …]
|
D | iomem.c | 24 unsigned long flags) in arch_memremap_can_ram_remap() argument 31 unsigned long flags) in try_ram_remap() argument 37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap() 71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument 77 if (!flags) in memremap() 87 if (flags & MEMREMAP_WB) { in memremap() 95 addr = try_ram_remap(offset, size, flags); in memremap() 106 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { in memremap() 112 if (!addr && (flags & MEMREMAP_WT)) in memremap() 115 if (!addr && (flags & MEMREMAP_WC)) in memremap() [all …]
|
/kernel/irq/ |
D | manage.c | 44 unsigned long flags; in __synchronize_hardirq() local 54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq() 70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq() 400 unsigned long flags; in irq_update_affinity_desc() local 411 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_update_affinity_desc() 447 irq_put_desc_busunlock(desc, flags); in irq_update_affinity_desc() 455 unsigned long flags; in __irq_set_affinity() local 461 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity() 463 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity() 500 unsigned long flags; in __irq_apply_affinity_hint() local [all …]
|
D | pm.c | 37 if (action->flags & IRQF_FORCE_RESUME) in irq_pm_install_action() 43 if (action->flags & IRQF_NO_SUSPEND) in irq_pm_install_action() 45 else if (action->flags & IRQF_COND_SUSPEND) in irq_pm_install_action() 61 if (action->flags & IRQF_FORCE_RESUME) in irq_pm_remove_action() 64 if (action->flags & IRQF_NO_SUSPEND) in irq_pm_remove_action() 66 else if (action->flags & IRQF_COND_SUSPEND) in irq_pm_remove_action() 72 unsigned long chipflags = irq_desc_get_chip(desc)->flags; in suspend_device_irq() 137 unsigned long flags; in suspend_device_irqs() local 142 raw_spin_lock_irqsave(&desc->lock, flags); in suspend_device_irqs() 144 raw_spin_unlock_irqrestore(&desc->lock, flags); in suspend_device_irqs() [all …]
|
/kernel/trace/ |
D | trace_functions_graph.c | 95 struct trace_seq *s, u32 flags); 130 unsigned long flags; in trace_graph_entry() local 171 local_irq_save(flags); in trace_graph_entry() 176 trace_ctx = tracing_gen_ctx_flags(flags); in trace_graph_entry() 183 local_irq_restore(flags); in trace_graph_entry() 239 unsigned long flags; in trace_graph_return() local 251 local_irq_save(flags); in trace_graph_return() 256 trace_ctx = tracing_gen_ctx_flags(flags); in trace_graph_return() 260 local_irq_restore(flags); in trace_graph_return() 512 enum trace_type type, int cpu, pid_t pid, u32 flags) in print_graph_irq() argument [all …]
|
D | trace_output.c | 67 unsigned long flags, in trace_print_flags_seq() argument 75 for (i = 0; flag_array[i].name && flags; i++) { in trace_print_flags_seq() 78 if ((flags & mask) != mask) in trace_print_flags_seq() 82 flags &= ~mask; in trace_print_flags_seq() 91 if (flags) { in trace_print_flags_seq() 94 trace_seq_printf(p, "0x%lx", flags); in trace_print_flags_seq() 131 unsigned long long flags, in trace_print_flags_seq_u64() argument 139 for (i = 0; flag_array[i].name && flags; i++) { in trace_print_flags_seq_u64() 142 if ((flags & mask) != mask) in trace_print_flags_seq_u64() 146 flags &= ~mask; in trace_print_flags_seq_u64() [all …]
|
/kernel/cgroup/ |
D | freezer.c | 28 if (!test_bit(CGRP_FROZEN, &cgrp->flags) && in cgroup_propagate_frozen() 29 test_bit(CGRP_FREEZE, &cgrp->flags) && in cgroup_propagate_frozen() 32 set_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_propagate_frozen() 39 if (test_bit(CGRP_FROZEN, &cgrp->flags)) { in cgroup_propagate_frozen() 40 clear_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_propagate_frozen() 64 frozen = test_bit(CGRP_FREEZE, &cgrp->flags) && in cgroup_update_frozen() 69 if (test_bit(CGRP_FROZEN, &cgrp->flags)) in cgroup_update_frozen() 72 set_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_update_frozen() 75 if (!test_bit(CGRP_FROZEN, &cgrp->flags)) in cgroup_update_frozen() 78 clear_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_update_frozen() [all …]
|
/kernel/rcu/ |
D | tree_exp.h | 78 unsigned long flags; in sync_exp_reset_tree_hotplug() local 95 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 97 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 104 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 115 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); in sync_exp_reset_tree_hotplug() 119 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); in sync_exp_reset_tree_hotplug() 134 unsigned long flags; in sync_exp_reset_tree() local 139 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree() 142 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree() 165 unsigned long flags; in sync_rcu_exp_done_unlocked() local [all …]
|
D | tree_nocb.h | 177 unsigned long flags) in rcu_nocb_unlock_irqrestore() argument 181 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_unlock_irqrestore() 183 local_irq_restore(flags); in rcu_nocb_unlock_irqrestore() 217 bool force, unsigned long flags) in __wake_nocb_gp() argument 223 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp() 238 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp() 252 unsigned long flags; in wake_nocb_gp() local 255 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp() 256 return __wake_nocb_gp(rdp_gp, rdp, force, flags); in wake_nocb_gp() 291 unsigned long flags; in wake_nocb_gp_defer() local [all …]
|
D | tree.c | 82 .cblist.flags = SEGCBLIST_RCU_CORE, 147 unsigned long gps, unsigned long flags); 536 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, in rcutorture_get_gp_data() argument 541 *flags = READ_ONCE(rcu_state.gp_flags); in rcutorture_get_gp_data() 576 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) in rcu_irq_work_resched() 579 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) in rcu_irq_work_resched() 1306 unsigned long flags; in note_gp_changes() local 1310 local_irq_save(flags); in note_gp_changes() 1315 local_irq_restore(flags); in note_gp_changes() 1319 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in note_gp_changes() [all …]
|
/kernel/locking/ |
D | rwbase_rt.c | 187 unsigned long flags) in __rwbase_write_unlock() argument 196 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in __rwbase_write_unlock() 203 unsigned long flags; in rwbase_write_unlock() local 205 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_unlock() 206 __rwbase_write_unlock(rwb, WRITER_BIAS, flags); in rwbase_write_unlock() 212 unsigned long flags; in rwbase_write_downgrade() local 214 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_downgrade() 216 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); in rwbase_write_downgrade() 240 unsigned long flags; in rwbase_write_lock() local 249 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_lock() [all …]
|
D | semaphore.c | 56 unsigned long flags; in down() local 59 raw_spin_lock_irqsave(&sem->lock, flags); in down() 64 raw_spin_unlock_irqrestore(&sem->lock, flags); in down() 79 unsigned long flags; in down_interruptible() local 83 raw_spin_lock_irqsave(&sem->lock, flags); in down_interruptible() 88 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_interruptible() 106 unsigned long flags; in down_killable() local 110 raw_spin_lock_irqsave(&sem->lock, flags); in down_killable() 115 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_killable() 136 unsigned long flags; in down_trylock() local [all …]
|
/kernel/power/ |
D | wakeup_reason.c | 153 unsigned long flags; in log_irq_wakeup_reason() local 155 spin_lock_irqsave(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 157 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 162 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 170 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 176 unsigned long flags; in log_threaded_irq_wakeup_reason() local 188 spin_lock_irqsave(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() 191 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() 196 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() 212 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() [all …]
|
/kernel/time/ |
D | clocksource.c | 131 static inline void clocksource_watchdog_lock(unsigned long *flags) in clocksource_watchdog_lock() argument 133 spin_lock_irqsave(&watchdog_lock, *flags); in clocksource_watchdog_lock() 136 static inline void clocksource_watchdog_unlock(unsigned long *flags) in clocksource_watchdog_unlock() argument 138 spin_unlock_irqrestore(&watchdog_lock, *flags); in clocksource_watchdog_unlock() 170 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable() 171 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable() 199 unsigned long flags; in clocksource_mark_unstable() local 201 spin_lock_irqsave(&watchdog_lock, flags); in clocksource_mark_unstable() 202 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable() 207 spin_unlock_irqrestore(&watchdog_lock, flags); in clocksource_mark_unstable() [all …]
|
D | posix-timers.c | 105 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); 107 #define lock_timer(tid, flags) \ argument 109 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ 164 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) in unlock_timer() argument 166 spin_unlock_irqrestore(&timr->it_lock, flags); in unlock_timer() 316 unsigned long flags; in posixtimer_rearm() local 318 timr = lock_timer(info->si_tid, &flags); in posixtimer_rearm() 333 unlock_timer(timr, flags); in posixtimer_rearm() 369 unsigned long flags; in posix_timer_fn() local 374 spin_lock_irqsave(&timr->it_lock, flags); in posix_timer_fn() [all …]
|
D | alarmtimer.c | 73 unsigned long flags; in alarmtimer_get_rtcdev() local 76 spin_lock_irqsave(&rtcdev_lock, flags); in alarmtimer_get_rtcdev() 78 spin_unlock_irqrestore(&rtcdev_lock, flags); in alarmtimer_get_rtcdev() 87 unsigned long flags; in alarmtimer_rtc_add_device() local 105 spin_lock_irqsave(&rtcdev_lock, flags); in alarmtimer_rtc_add_device() 120 spin_unlock_irqrestore(&rtcdev_lock, flags); in alarmtimer_rtc_add_device() 201 unsigned long flags; in alarmtimer_fired() local 205 spin_lock_irqsave(&base->lock, flags); in alarmtimer_fired() 207 spin_unlock_irqrestore(&base->lock, flags); in alarmtimer_fired() 212 spin_lock_irqsave(&base->lock, flags); in alarmtimer_fired() [all …]
|
/kernel/bpf/ |
D | ringbuf.c | 97 const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL | in bpf_ringbuf_area_alloc() local 130 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc() 245 u64 flags) in ringbuf_map_update_elem() argument 393 unsigned long cons_pos, prod_pos, new_prod_pos, flags; in __bpf_ringbuf_reserve() local 407 if (!spin_trylock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve() 410 spin_lock_irqsave(&rb->spinlock, flags); in __bpf_ringbuf_reserve() 420 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve() 432 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve() 437 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags) in BPF_CALL_3() argument 441 if (unlikely(flags)) in BPF_CALL_3() [all …]
|
D | stackmap.c | 214 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid() argument 218 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in __bpf_get_stackid() 220 bool user = flags & BPF_F_USER_STACK; in __bpf_get_stackid() 237 if (hash_matches && flags & BPF_F_FAST_STACK_CMP) in __bpf_get_stackid() 256 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in __bpf_get_stackid() 264 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in __bpf_get_stackid() 284 u64, flags) in BPF_CALL_3() argument 287 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_3() 288 bool user = flags & BPF_F_USER_STACK; in BPF_CALL_3() 292 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | in BPF_CALL_3() [all …]
|
/kernel/futex/ |
D | syscalls.c | 90 unsigned int flags = 0; in do_futex() local 93 flags |= FLAGS_SHARED; in do_futex() 96 flags |= FLAGS_CLOCKRT; in do_futex() 102 trace_android_vh_do_futex(cmd, &flags, uaddr2); in do_futex() 108 return futex_wait(uaddr, flags, val, timeout, val3); in do_futex() 113 return futex_wake(uaddr, flags, val, val3); in do_futex() 115 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); in do_futex() 117 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); in do_futex() 119 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); in do_futex() 121 flags |= FLAGS_CLOCKRT; in do_futex() [all …]
|