/kernel/sched/ |
D | wait.c | 21 unsigned long flags; in add_wait_queue() local 23 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue() 24 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue() 26 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue() 32 unsigned long flags; in add_wait_queue_exclusive() local 34 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive() 35 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive() 37 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive() 43 unsigned long flags; in add_wait_queue_priority() local 45 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; in add_wait_queue_priority() [all …]
|
D | isolation.c | 17 bool housekeeping_enabled(enum hk_flags flags) in housekeeping_enabled() argument 19 return !!(housekeeping_flags & flags); in housekeeping_enabled() 23 int housekeeping_any_cpu(enum hk_flags flags) in housekeeping_any_cpu() argument 28 if (housekeeping_flags & flags) { in housekeeping_any_cpu() 40 const struct cpumask *housekeeping_cpumask(enum hk_flags flags) in housekeeping_cpumask() argument 43 if (housekeeping_flags & flags) in housekeeping_cpumask() 49 void housekeeping_affine(struct task_struct *t, enum hk_flags flags) in housekeeping_affine() argument 52 if (housekeeping_flags & flags) in housekeeping_affine() 57 bool housekeeping_test_cpu(int cpu, enum hk_flags flags) in housekeeping_test_cpu() argument 60 if (housekeeping_flags & flags) in housekeeping_test_cpu() [all …]
|
/kernel/ |
D | nsproxy.c | 67 static struct nsproxy *create_new_namespaces(unsigned long flags, in create_new_namespaces() argument 78 new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs); in create_new_namespaces() 84 new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns); in create_new_namespaces() 90 new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns); in create_new_namespaces() 97 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children); in create_new_namespaces() 103 new_nsp->cgroup_ns = copy_cgroup_ns(flags, user_ns, in create_new_namespaces() 110 new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns); in create_new_namespaces() 116 new_nsp->time_ns_for_children = copy_time_ns(flags, user_ns, in create_new_namespaces() 151 int copy_namespaces(unsigned long flags, struct task_struct *tsk) in copy_namespaces() argument 157 if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | in copy_namespaces() [all …]
|
D | freezer.c | 43 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) in freezing_slow_path() 52 if (pm_freezing && !(p->flags & PF_KTHREAD)) in freezing_slow_path() 73 current->flags |= PF_FROZEN; in __refrigerator() 76 current->flags &= ~PF_FROZEN; in __refrigerator() 80 if (!(current->flags & PF_FROZEN)) in __refrigerator() 101 unsigned long flags; in fake_signal_wake_up() local 103 if (lock_task_sighand(p, &flags)) { in fake_signal_wake_up() 105 unlock_task_sighand(p, &flags); in fake_signal_wake_up() 122 unsigned long flags; in freeze_task() local 136 spin_lock_irqsave(&freezer_lock, flags); in freeze_task() [all …]
|
D | kthread.c | 53 unsigned long flags; member 73 WARN_ON(!(k->flags & PF_KTHREAD)); in to_kthread() 91 if (kthread && !(p->flags & PF_KTHREAD)) in __to_kthread() 136 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); in kthread_should_stop() 142 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); in __kthread_should_park() 249 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) in __kthread_parkme() 316 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { in kthread() 462 unsigned long flags; in __kthread_bind_mask() local 470 raw_spin_lock_irqsave(&p->pi_lock, flags); in __kthread_bind_mask() 472 p->flags |= PF_NO_SETAFFINITY; in __kthread_bind_mask() [all …]
|
D | kexec.c | 25 unsigned long flags) in kimage_alloc_init() argument 29 bool kexec_on_panic = flags & KEXEC_ON_CRASH; in kimage_alloc_init() 88 struct kexec_segment *segments, unsigned long flags) in do_kexec_load() argument 102 if (flags & KEXEC_ON_CRASH) { in do_kexec_load() 116 if (flags & KEXEC_ON_CRASH) { in do_kexec_load() 125 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 129 if (flags & KEXEC_PRESERVE_CONTEXT) in do_kexec_load() 160 if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) in do_kexec_load() 191 unsigned long flags) in kexec_load_check() argument 216 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) in kexec_load_check() [all …]
|
D | iomem.c | 24 unsigned long flags) in arch_memremap_can_ram_remap() argument 31 unsigned long flags) in try_ram_remap() argument 37 arch_memremap_can_ram_remap(offset, size, flags)) in try_ram_remap() 71 void *memremap(resource_size_t offset, size_t size, unsigned long flags) in memremap() argument 77 if (!flags) in memremap() 87 if (flags & MEMREMAP_WB) { in memremap() 95 addr = try_ram_remap(offset, size, flags); in memremap() 106 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { in memremap() 112 if (!addr && (flags & MEMREMAP_WT)) in memremap() 115 if (!addr && (flags & MEMREMAP_WC)) in memremap() [all …]
|
D | resource.c | 37 .flags = IORESOURCE_IO, 45 .flags = IORESOURCE_MEM, 153 static struct resource *alloc_resource(gfp_t flags) in alloc_resource() argument 155 return kzalloc(sizeof(struct resource), flags); in alloc_resource() 314 unsigned long flags, unsigned long desc, in find_next_iomem_res() argument 338 if ((p->flags & flags) != flags) in find_next_iomem_res() 352 .flags = p->flags, in find_next_iomem_res() 363 unsigned long flags, unsigned long desc, in __walk_iomem_res_desc() argument 371 !find_next_iomem_res(start, end, flags, desc, &res)) { in __walk_iomem_res_desc() 399 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, in walk_iomem_res_desc() argument [all …]
|
/kernel/irq/ |
D | manage.c | 44 unsigned long flags; in __synchronize_hardirq() local 54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq() 70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq() 408 unsigned long flags; in irq_update_affinity_desc() local 419 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_update_affinity_desc() 455 irq_put_desc_busunlock(desc, flags); in irq_update_affinity_desc() 463 unsigned long flags; in __irq_set_affinity() local 469 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity() 471 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity() 507 unsigned long flags; in irq_set_affinity_hint() local [all …]
|
D | pm.c | 37 if (action->flags & IRQF_FORCE_RESUME) in irq_pm_install_action() 43 if (action->flags & IRQF_NO_SUSPEND) in irq_pm_install_action() 45 else if (action->flags & IRQF_COND_SUSPEND) in irq_pm_install_action() 61 if (action->flags & IRQF_FORCE_RESUME) in irq_pm_remove_action() 64 if (action->flags & IRQF_NO_SUSPEND) in irq_pm_remove_action() 66 else if (action->flags & IRQF_COND_SUSPEND) in irq_pm_remove_action() 72 unsigned long chipflags = irq_desc_get_chip(desc)->flags; in suspend_device_irq() 137 unsigned long flags; in suspend_device_irqs() local 142 raw_spin_lock_irqsave(&desc->lock, flags); in suspend_device_irqs() 144 raw_spin_unlock_irqrestore(&desc->lock, flags); in suspend_device_irqs() [all …]
|
/kernel/trace/ |
D | trace_functions_graph.c | 95 struct trace_seq *s, u32 flags); 130 unsigned long flags; in trace_graph_entry() local 171 local_irq_save(flags); in trace_graph_entry() 176 trace_ctx = tracing_gen_ctx_flags(flags); in trace_graph_entry() 183 local_irq_restore(flags); in trace_graph_entry() 239 unsigned long flags; in trace_graph_return() local 251 local_irq_save(flags); in trace_graph_return() 256 trace_ctx = tracing_gen_ctx_flags(flags); in trace_graph_return() 260 local_irq_restore(flags); in trace_graph_return() 512 enum trace_type type, int cpu, pid_t pid, u32 flags) in print_graph_irq() argument [all …]
|
/kernel/cgroup/ |
D | freezer.c | 27 if (!test_bit(CGRP_FROZEN, &cgrp->flags) && in cgroup_propagate_frozen() 28 test_bit(CGRP_FREEZE, &cgrp->flags) && in cgroup_propagate_frozen() 31 set_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_propagate_frozen() 38 if (test_bit(CGRP_FROZEN, &cgrp->flags)) { in cgroup_propagate_frozen() 39 clear_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_propagate_frozen() 63 frozen = test_bit(CGRP_FREEZE, &cgrp->flags) && in cgroup_update_frozen() 68 if (test_bit(CGRP_FROZEN, &cgrp->flags)) in cgroup_update_frozen() 71 set_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_update_frozen() 74 if (!test_bit(CGRP_FROZEN, &cgrp->flags)) in cgroup_update_frozen() 77 clear_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_update_frozen() [all …]
|
/kernel/locking/ |
D | rwbase_rt.c | 180 unsigned long flags) in __rwbase_write_unlock() argument 189 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); in __rwbase_write_unlock() 196 unsigned long flags; in rwbase_write_unlock() local 198 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_unlock() 199 __rwbase_write_unlock(rwb, WRITER_BIAS, flags); in rwbase_write_unlock() 205 unsigned long flags; in rwbase_write_downgrade() local 207 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_downgrade() 209 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); in rwbase_write_downgrade() 233 unsigned long flags; in rwbase_write_lock() local 242 raw_spin_lock_irqsave(&rtm->wait_lock, flags); in rwbase_write_lock() [all …]
|
D | semaphore.c | 55 unsigned long flags; in down() local 58 raw_spin_lock_irqsave(&sem->lock, flags); in down() 63 raw_spin_unlock_irqrestore(&sem->lock, flags); in down() 78 unsigned long flags; in down_interruptible() local 82 raw_spin_lock_irqsave(&sem->lock, flags); in down_interruptible() 87 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_interruptible() 105 unsigned long flags; in down_killable() local 109 raw_spin_lock_irqsave(&sem->lock, flags); in down_killable() 114 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_killable() 135 unsigned long flags; in down_trylock() local [all …]
|
/kernel/power/ |
D | wakeup_reason.c | 153 unsigned long flags; in log_irq_wakeup_reason() local 155 spin_lock_irqsave(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 157 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 162 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 170 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_irq_wakeup_reason() 176 unsigned long flags; in log_threaded_irq_wakeup_reason() local 188 spin_lock_irqsave(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() 191 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() 196 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() 212 spin_unlock_irqrestore(&wakeup_reason_lock, flags); in log_threaded_irq_wakeup_reason() [all …]
|
/kernel/rcu/ |
D | tree_nocb.h | 174 unsigned long flags) in rcu_nocb_unlock_irqrestore() argument 178 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_unlock_irqrestore() 180 local_irq_restore(flags); in rcu_nocb_unlock_irqrestore() 222 bool force, unsigned long flags) in __wake_nocb_gp() argument 228 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp() 243 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp() 257 unsigned long flags; in wake_nocb_gp() local 260 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp() 261 return __wake_nocb_gp(rdp_gp, rdp, force, flags); in wake_nocb_gp() 296 unsigned long flags; in wake_nocb_gp_defer() local [all …]
|
D | tree_exp.h | 76 unsigned long flags; in sync_exp_reset_tree_hotplug() local 93 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 95 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 102 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 113 raw_spin_lock_irqsave_rcu_node(rnp_up, flags); in sync_exp_reset_tree_hotplug() 117 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); in sync_exp_reset_tree_hotplug() 132 unsigned long flags; in sync_exp_reset_tree() local 137 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree() 140 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree() 163 unsigned long flags; in sync_rcu_exp_done_unlocked() local [all …]
|
D | rcu_segcblist.h | 57 int flags) in rcu_segcblist_set_flags() argument 59 WRITE_ONCE(rsclp->flags, rsclp->flags | flags); in rcu_segcblist_set_flags() 63 int flags) in rcu_segcblist_clear_flags() argument 65 WRITE_ONCE(rsclp->flags, rsclp->flags & ~flags); in rcu_segcblist_clear_flags() 69 int flags) in rcu_segcblist_test_flags() argument 71 return READ_ONCE(rsclp->flags) & flags; in rcu_segcblist_test_flags() 95 int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; in rcu_segcblist_completely_offloaded() local 97 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags) in rcu_segcblist_completely_offloaded()
|
/kernel/time/ |
D | clocksource.c | 125 static inline void clocksource_watchdog_lock(unsigned long *flags) in clocksource_watchdog_lock() argument 127 spin_lock_irqsave(&watchdog_lock, *flags); in clocksource_watchdog_lock() 130 static inline void clocksource_watchdog_unlock(unsigned long *flags) in clocksource_watchdog_unlock() argument 132 spin_unlock_irqrestore(&watchdog_lock, *flags); in clocksource_watchdog_unlock() 164 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable() 165 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable() 193 unsigned long flags; in clocksource_mark_unstable() local 195 spin_lock_irqsave(&watchdog_lock, flags); in clocksource_mark_unstable() 196 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable() 201 spin_unlock_irqrestore(&watchdog_lock, flags); in clocksource_mark_unstable() [all …]
|
D | posix-timers.c | 105 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); 107 #define lock_timer(tid, flags) \ argument 109 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ 164 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) in unlock_timer() argument 166 spin_unlock_irqrestore(&timr->it_lock, flags); in unlock_timer() 316 unsigned long flags; in posixtimer_rearm() local 318 timr = lock_timer(info->si_tid, &flags); in posixtimer_rearm() 333 unlock_timer(timr, flags); in posixtimer_rearm() 369 unsigned long flags; in posix_timer_fn() local 374 spin_lock_irqsave(&timr->it_lock, flags); in posix_timer_fn() [all …]
|
D | alarmtimer.c | 73 unsigned long flags; in alarmtimer_get_rtcdev() local 76 spin_lock_irqsave(&rtcdev_lock, flags); in alarmtimer_get_rtcdev() 78 spin_unlock_irqrestore(&rtcdev_lock, flags); in alarmtimer_get_rtcdev() 87 unsigned long flags; in alarmtimer_rtc_add_device() local 105 spin_lock_irqsave(&rtcdev_lock, flags); in alarmtimer_rtc_add_device() 120 spin_unlock_irqrestore(&rtcdev_lock, flags); in alarmtimer_rtc_add_device() 201 unsigned long flags; in alarmtimer_fired() local 205 spin_lock_irqsave(&base->lock, flags); in alarmtimer_fired() 207 spin_unlock_irqrestore(&base->lock, flags); in alarmtimer_fired() 212 spin_lock_irqsave(&base->lock, flags); in alarmtimer_fired() [all …]
|
/kernel/bpf/ |
D | stackmap.c | 258 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid() argument 262 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in __bpf_get_stackid() 264 bool user = flags & BPF_F_USER_STACK; in __bpf_get_stackid() 281 if (hash_matches && flags & BPF_F_FAST_STACK_CMP) in __bpf_get_stackid() 300 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in __bpf_get_stackid() 308 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in __bpf_get_stackid() 328 u64, flags) in BPF_CALL_3() argument 331 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_3() 332 bool user = flags & BPF_F_USER_STACK; in BPF_CALL_3() 336 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | in BPF_CALL_3() [all …]
|
/kernel/kcsan/ |
D | report.c | 438 static void release_report(unsigned long *flags, struct other_info *other_info) in release_report() argument 445 raw_spin_unlock_irqrestore(&report_lock, *flags); in release_report() 454 static void set_other_info_task_blocking(unsigned long *flags, in set_other_info_task_blocking() argument 483 raw_spin_unlock_irqrestore(&report_lock, *flags); in set_other_info_task_blocking() 490 raw_spin_lock_irqsave(&report_lock, *flags); in set_other_info_task_blocking() 512 static void prepare_report_producer(unsigned long *flags, in prepare_report_producer() argument 516 raw_spin_lock_irqsave(&report_lock, *flags); in prepare_report_producer() 537 set_other_info_task_blocking(flags, ai, other_info); in prepare_report_producer() 539 raw_spin_unlock_irqrestore(&report_lock, *flags); in prepare_report_producer() 543 static bool prepare_report_consumer(unsigned long *flags, in prepare_report_consumer() argument [all …]
|
/kernel/printk/ |
D | internal.h | 25 #define printk_safe_enter_irqsave(flags) \ argument 27 local_irq_save(flags); \ 31 #define printk_safe_exit_irqrestore(flags) \ argument 34 local_irq_restore(flags); \ 40 enum printk_info_flags *flags); 48 #define printk_safe_enter_irqsave(flags) local_irq_save(flags) argument 49 #define printk_safe_exit_irqrestore(flags) local_irq_restore(flags) argument
|
/kernel/dma/ |
D | debug.c | 179 unsigned long flags; in driver_filter() local 203 read_lock_irqsave(&driver_name_lock, flags); in driver_filter() 212 read_unlock_irqrestore(&driver_name_lock, flags); in driver_filter() 249 unsigned long *flags) in get_hash_bucket() argument 256 *flags = __flags; in get_hash_bucket() 264 unsigned long flags) in put_hash_bucket() argument 267 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket() 350 unsigned long *flags) in bucket_find_contain() argument 366 put_hash_bucket(*bucket, *flags); in bucket_find_contain() 369 *bucket = get_hash_bucket(&index, flags); in bucket_find_contain() [all …]
|