/kernel/ |
D | kexec_core.c | 1057 char *cur = cmdline, *tmp; in parse_crashkernel_mem() local 1064 start = memparse(cur, &tmp); in parse_crashkernel_mem() 1065 if (cur == tmp) { in parse_crashkernel_mem() 1069 cur = tmp; in parse_crashkernel_mem() 1070 if (*cur != '-') { in parse_crashkernel_mem() 1074 cur++; in parse_crashkernel_mem() 1077 if (*cur != ':') { in parse_crashkernel_mem() 1078 end = memparse(cur, &tmp); in parse_crashkernel_mem() 1079 if (cur == tmp) { in parse_crashkernel_mem() 1083 cur = tmp; in parse_crashkernel_mem() [all …]
|
D | smpboot.c | 208 struct smp_hotplug_thread *cur; in smpboot_create_threads() local 212 list_for_each_entry(cur, &hotplug_threads, list) { in smpboot_create_threads() 213 ret = __smpboot_create_thread(cur, cpu); in smpboot_create_threads() 231 struct smp_hotplug_thread *cur; in smpboot_unpark_threads() local 234 list_for_each_entry(cur, &hotplug_threads, list) in smpboot_unpark_threads() 235 if (cpumask_test_cpu(cpu, cur->cpumask)) in smpboot_unpark_threads() 236 smpboot_unpark_thread(cur, cpu); in smpboot_unpark_threads() 250 struct smp_hotplug_thread *cur; in smpboot_park_threads() local 253 list_for_each_entry_reverse(cur, &hotplug_threads, list) in smpboot_park_threads() 254 smpboot_park_thread(cur, cpu); in smpboot_park_threads()
|
D | cpuset.c | 467 static int validate_change(struct cpuset *cur, struct cpuset *trial) in validate_change() argument 477 cpuset_for_each_child(c, css, cur) in validate_change() 483 if (cur == &top_cpuset) in validate_change() 486 par = parent_cs(cur); in validate_change() 501 c != cur && in validate_change() 505 c != cur && in validate_change() 515 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change() 516 if (!cpumask_empty(cur->cpus_allowed) && in validate_change() 519 if (!nodes_empty(cur->mems_allowed) && in validate_change() 529 if (is_cpu_exclusive(cur) && in validate_change() [all …]
|
D | kprobes.c | 1050 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_fault_handler() local 1056 if (cur && cur->fault_handler) { in aggr_fault_handler() 1057 if (cur->fault_handler(cur, regs, trapnr)) in aggr_fault_handler() 1066 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_break_handler() local 1069 if (cur && cur->break_handler) { in aggr_break_handler() 1070 if (cur->break_handler(cur, regs)) in aggr_break_handler()
|
/kernel/power/ |
D | snapshot.c | 311 struct bm_position cur; /* most recently used bit position */ member 478 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, in memory_bm_position_reset() 480 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset() 482 bm->cur.node_pfn = 0; in memory_bm_position_reset() 483 bm->cur.node_bit = 0; in memory_bm_position_reset() 522 struct mem_extent *ext, *cur, *aux; in create_mem_extents() local 553 cur = ext; in create_mem_extents() 554 list_for_each_entry_safe_continue(cur, aux, list, hook) { in create_mem_extents() 555 if (zone_end < cur->start) in create_mem_extents() 557 if (zone_end < cur->end) in create_mem_extents() [all …]
|
D | swap.c | 96 struct swap_map_page *cur; member 397 if (handle->cur) in release_swap_writer() 398 free_page((unsigned long)handle->cur); in release_swap_writer() 399 handle->cur = NULL; in release_swap_writer() 413 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); in get_swap_writer() 414 if (!handle->cur) { in get_swap_writer() 440 if (!handle->cur) in swap_write_page() 446 handle->cur->entries[handle->k++] = offset; in swap_write_page() 451 handle->cur->next_swap = offset; in swap_write_page() 452 error = write_page(handle->cur, handle->cur_swap, hb); in swap_write_page() [all …]
|
D | power.h | 113 unsigned int cur; /* number of the block of PAGE_SIZE bytes the member
|
/kernel/locking/ |
D | mutex.c | 161 struct mutex_waiter *cur; in ww_mutex_set_context_fastpath() local 187 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_fastpath() 188 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_fastpath() 189 wake_up_process(cur->task); in ww_mutex_set_context_fastpath() 204 struct mutex_waiter *cur; in ww_mutex_set_context_slowpath() local 213 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_slowpath() 214 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_slowpath() 215 wake_up_process(cur->task); in ww_mutex_set_context_slowpath()
|
/kernel/time/ |
D | tick-broadcast.c | 91 struct clock_event_device *cur = tick_broadcast_device.evtdev; in tick_install_broadcast_device() local 93 if (!tick_check_broadcast_device(cur, dev)) in tick_install_broadcast_device() 99 clockevents_exchange_device(cur, dev); in tick_install_broadcast_device() 100 if (cur) in tick_install_broadcast_device() 101 cur->event_handler = clockevents_handle_noop; in tick_install_broadcast_device()
|
/kernel/sched/ |
D | fair.c | 1410 struct task_struct *cur; in task_numa_compare() local 1421 cur = dst_rq->curr; in task_numa_compare() 1425 if ((cur->flags & PF_EXITING) || is_idle_task(cur)) in task_numa_compare() 1426 cur = NULL; in task_numa_compare() 1437 get_task_struct(cur); in task_numa_compare() 1446 if (cur == env->p) in task_numa_compare() 1456 if (cur) { in task_numa_compare() 1458 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) in task_numa_compare() 1465 if (cur->numa_group == env->p->numa_group) { in task_numa_compare() 1466 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare() [all …]
|
D | cpufreq_schedutil.c | 150 policy->cur = next_freq; in sugov_update_commit() 185 policy->cpuinfo.max_freq : policy->cur; in get_next_freq()
|
D | core.c | 1405 int migrate_swap(struct task_struct *cur, struct task_struct *p) in migrate_swap() argument 1411 .src_task = cur, in migrate_swap() 1412 .src_cpu = task_cpu(cur), in migrate_swap() 1433 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap() 5200 int cpuset_cpumask_can_shrink(const struct cpumask *cur, in cpuset_cpumask_can_shrink() argument 5207 if (!cpumask_weight(cur)) in cpuset_cpumask_can_shrink() 5211 cur_dl_b = dl_bw_of(cpumask_any(cur)); in cpuset_cpumask_can_shrink() 7521 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, in dattrs_equal() argument 7527 if (!new && !cur) in dattrs_equal() 7531 return !memcmp(cur ? (cur + idx_cur) : &tmp, in dattrs_equal()
|
/kernel/bpf/ |
D | verifier.c | 1698 static bool states_equal(struct verifier_state *old, struct verifier_state *cur) in states_equal() argument 1703 if (memcmp(&old->regs[i], &cur->regs[i], in states_equal() 1707 cur->regs[i].type != NOT_INIT)) in states_equal() 1716 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) in states_equal() 1726 &cur->spilled_regs[i / BPF_REG_SIZE], in states_equal()
|
/kernel/rcu/ |
D | tree_plugin.h | 2735 int cur; in rcu_sysidle_check_cpu() local 2755 cur = atomic_read(&rdtp->dynticks_idle); in rcu_sysidle_check_cpu() 2756 if (cur & 0x1) { in rcu_sysidle_check_cpu()
|