Home
last modified time | relevance | path

Searched refs:cur (Results 1 – 16 of 16) sorted by relevance

/kernel/
Dkexec_core.c1104 char *cur = cmdline, *tmp; in parse_crashkernel_mem() local
1111 start = memparse(cur, &tmp); in parse_crashkernel_mem()
1112 if (cur == tmp) { in parse_crashkernel_mem()
1116 cur = tmp; in parse_crashkernel_mem()
1117 if (*cur != '-') { in parse_crashkernel_mem()
1121 cur++; in parse_crashkernel_mem()
1124 if (*cur != ':') { in parse_crashkernel_mem()
1125 end = memparse(cur, &tmp); in parse_crashkernel_mem()
1126 if (cur == tmp) { in parse_crashkernel_mem()
1130 cur = tmp; in parse_crashkernel_mem()
[all …]
Dsmpboot.c213 struct smp_hotplug_thread *cur; in smpboot_create_threads() local
217 list_for_each_entry(cur, &hotplug_threads, list) { in smpboot_create_threads()
218 ret = __smpboot_create_thread(cur, cpu); in smpboot_create_threads()
236 struct smp_hotplug_thread *cur; in smpboot_unpark_threads() local
239 list_for_each_entry(cur, &hotplug_threads, list) in smpboot_unpark_threads()
240 if (cpumask_test_cpu(cpu, cur->cpumask)) in smpboot_unpark_threads()
241 smpboot_unpark_thread(cur, cpu); in smpboot_unpark_threads()
256 struct smp_hotplug_thread *cur; in smpboot_park_threads() local
259 list_for_each_entry_reverse(cur, &hotplug_threads, list) in smpboot_park_threads()
260 smpboot_park_thread(cur, cpu); in smpboot_park_threads()
Dcpuset.c468 static int validate_change(struct cpuset *cur, struct cpuset *trial) in validate_change() argument
478 cpuset_for_each_child(c, css, cur) in validate_change()
484 if (cur == &top_cpuset) in validate_change()
487 par = parent_cs(cur); in validate_change()
502 c != cur && in validate_change()
506 c != cur && in validate_change()
516 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
517 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
520 if (!nodes_empty(cur->mems_allowed) && in validate_change()
530 if (is_cpu_exclusive(cur) && in validate_change()
[all …]
Dkprobes.c1035 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_fault_handler() local
1041 if (cur && cur->fault_handler) { in aggr_fault_handler()
1042 if (cur->fault_handler(cur, regs, trapnr)) in aggr_fault_handler()
1051 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_break_handler() local
1054 if (cur && cur->break_handler) { in aggr_break_handler()
1055 if (cur->break_handler(cur, regs)) in aggr_break_handler()
Dcpu.c1825 ssize_t cur, res = 0; in show_cpuhp_states() local
1833 cur = sprintf(buf, "%3d: %s\n", i, sp->name); in show_cpuhp_states()
1834 buf += cur; in show_cpuhp_states()
1835 res += cur; in show_cpuhp_states()
/kernel/power/
Dsnapshot.c380 struct bm_position cur; /* most recently used bit position */ member
548 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, in memory_bm_position_reset()
550 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset()
552 bm->cur.node_pfn = 0; in memory_bm_position_reset()
553 bm->cur.node_bit = 0; in memory_bm_position_reset()
593 struct mem_extent *ext, *cur, *aux; in create_mem_extents() local
624 cur = ext; in create_mem_extents()
625 list_for_each_entry_safe_continue(cur, aux, list, hook) { in create_mem_extents()
626 if (zone_end < cur->start) in create_mem_extents()
628 if (zone_end < cur->end) in create_mem_extents()
[all …]
Dswap.c96 struct swap_map_page *cur; member
405 if (handle->cur) in release_swap_writer()
406 free_page((unsigned long)handle->cur); in release_swap_writer()
407 handle->cur = NULL; in release_swap_writer()
421 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); in get_swap_writer()
422 if (!handle->cur) { in get_swap_writer()
448 if (!handle->cur) in swap_write_page()
454 handle->cur->entries[handle->k++] = offset; in swap_write_page()
459 handle->cur->next_swap = offset; in swap_write_page()
460 error = write_page(handle->cur, handle->cur_swap, hb); in swap_write_page()
[all …]
Dpower.h136 unsigned int cur; /* number of the block of PAGE_SIZE bytes the member
/kernel/locking/
Dmutex.c161 struct mutex_waiter *cur; in ww_mutex_set_context_fastpath() local
187 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_fastpath()
188 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_fastpath()
189 wake_up_process(cur->task); in ww_mutex_set_context_fastpath()
204 struct mutex_waiter *cur; in ww_mutex_set_context_slowpath() local
213 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_slowpath()
214 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_slowpath()
215 wake_up_process(cur->task); in ww_mutex_set_context_slowpath()
/kernel/time/
Dtick-broadcast.c91 struct clock_event_device *cur = tick_broadcast_device.evtdev; in tick_install_broadcast_device() local
93 if (!tick_check_broadcast_device(cur, dev)) in tick_install_broadcast_device()
99 clockevents_exchange_device(cur, dev); in tick_install_broadcast_device()
100 if (cur) in tick_install_broadcast_device()
101 cur->event_handler = clockevents_handle_noop; in tick_install_broadcast_device()
/kernel/bpf/
Dverifier.c2651 struct bpf_reg_state *cur) in compare_ptrs_to_packet() argument
2653 if (old->id != cur->id) in compare_ptrs_to_packet()
2664 if (old->off == cur->off && old->range < cur->range) in compare_ptrs_to_packet()
2693 if (old->off <= cur->off && in compare_ptrs_to_packet()
2694 old->off >= old->range && cur->off >= cur->range) in compare_ptrs_to_packet()
2728 struct bpf_verifier_state *cur) in states_equal() argument
2736 rcur = &cur->regs[i]; in states_equal()
2774 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) in states_equal()
2784 &cur->spilled_regs[i / BPF_REG_SIZE], in states_equal()
Dsyscall.c82 unsigned long memlock_limit, cur; in bpf_map_precharge_memlock() local
85 cur = atomic_long_read(&user->locked_vm); in bpf_map_precharge_memlock()
87 if (cur + pages > memlock_limit) in bpf_map_precharge_memlock()
/kernel/sched/
Dfair.c1531 struct task_struct *cur; in task_numa_compare() local
1539 cur = task_rcu_dereference(&dst_rq->curr); in task_numa_compare()
1540 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) in task_numa_compare()
1541 cur = NULL; in task_numa_compare()
1547 if (cur == env->p) in task_numa_compare()
1557 if (cur) { in task_numa_compare()
1559 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) in task_numa_compare()
1566 if (cur->numa_group == env->p->numa_group) { in task_numa_compare()
1567 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1568 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
[all …]
Dcpufreq_schedutil.c149 policy->cur = next_freq; in sugov_update_commit()
184 policy->cpuinfo.max_freq : policy->cur; in get_next_freq()
Dcore.c1355 int migrate_swap(struct task_struct *cur, struct task_struct *p) in migrate_swap() argument
1361 .src_task = cur, in migrate_swap()
1362 .src_cpu = task_cpu(cur), in migrate_swap()
1383 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap()
5435 int cpuset_cpumask_can_shrink(const struct cpumask *cur, in cpuset_cpumask_can_shrink() argument
5442 if (!cpumask_weight(cur)) in cpuset_cpumask_can_shrink()
5446 cur_dl_b = dl_bw_of(cpumask_any(cur)); in cpuset_cpumask_can_shrink()
7387 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, in dattrs_equal() argument
7393 if (!new && !cur) in dattrs_equal()
7397 return !memcmp(cur ? (cur + idx_cur) : &tmp, in dattrs_equal()
/kernel/rcu/
Dtree_plugin.h2640 int cur; in rcu_sysidle_check_cpu() local
2660 cur = atomic_read(&rdtp->dynticks_idle); in rcu_sysidle_check_cpu()
2661 if (cur & 0x1) { in rcu_sysidle_check_cpu()