Home
last modified time | relevance | path

Searched refs:tmp (Results 1 – 25 of 75) sorted by relevance

123

/kernel/
Ddelayacct.c90 s64 tmp; in __delayacct_add_tsk() local
93 tmp = (s64)d->cpu_run_real_total; in __delayacct_add_tsk()
94 tmp += utime + stime; in __delayacct_add_tsk()
95 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; in __delayacct_add_tsk()
98 tmp = (s64)d->cpu_scaled_run_real_total; in __delayacct_add_tsk()
99 tmp += utimescaled + stimescaled; in __delayacct_add_tsk()
101 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; in __delayacct_add_tsk()
113 tmp = (s64)d->cpu_delay_total + t2; in __delayacct_add_tsk()
114 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; in __delayacct_add_tsk()
116 tmp = (s64)d->cpu_run_virtual_total + t3; in __delayacct_add_tsk()
[all …]
Dresource.c189 struct resource *tmp, **p; in __request_resource() local
199 tmp = *p; in __request_resource()
200 if (!tmp || tmp->start > end) { in __request_resource()
201 new->sibling = tmp; in __request_resource()
206 p = &tmp->sibling; in __request_resource()
207 if (tmp->end < start) in __request_resource()
209 return tmp; in __request_resource()
215 struct resource *tmp, **p, *chd; in __release_resource() local
219 tmp = *p; in __release_resource()
220 if (!tmp) in __release_resource()
[all …]
Dcrash_core.c43 char *cur = cmdline, *tmp; in parse_crashkernel_mem() local
50 start = memparse(cur, &tmp); in parse_crashkernel_mem()
51 if (cur == tmp) { in parse_crashkernel_mem()
55 cur = tmp; in parse_crashkernel_mem()
64 end = memparse(cur, &tmp); in parse_crashkernel_mem()
65 if (cur == tmp) { in parse_crashkernel_mem()
69 cur = tmp; in parse_crashkernel_mem()
82 size = memparse(cur, &tmp); in parse_crashkernel_mem()
83 if (cur == tmp) { in parse_crashkernel_mem()
87 cur = tmp; in parse_crashkernel_mem()
[all …]
Dpid.c165 struct pid_namespace *tmp; in alloc_pid() local
184 tmp = ns; in alloc_pid()
200 if (tid != 1 && !tmp->child_reaper) in alloc_pid()
203 if (!checkpoint_restore_ns_capable(tmp->user_ns)) in alloc_pid()
212 nr = idr_alloc(&tmp->idr, NULL, tid, in alloc_pid()
226 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) in alloc_pid()
233 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, in alloc_pid()
245 pid->numbers[i].ns = tmp; in alloc_pid()
246 tmp = tmp->parent; in alloc_pid()
342 int tmp; in __change_pid() local
[all …]
Dsys.c994 struct tms tmp; in SYSCALL_DEFINE1() local
996 do_sys_times(&tmp); in SYSCALL_DEFINE1()
997 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) in SYSCALL_DEFINE1()
1014 struct compat_tms tmp; in COMPAT_SYSCALL_DEFINE1() local
1018 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); in COMPAT_SYSCALL_DEFINE1()
1019 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); in COMPAT_SYSCALL_DEFINE1()
1020 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); in COMPAT_SYSCALL_DEFINE1()
1021 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); in COMPAT_SYSCALL_DEFINE1()
1022 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) in COMPAT_SYSCALL_DEFINE1()
1276 struct new_utsname tmp; in SYSCALL_DEFINE1() local
[all …]
Dsysctl.c209 struct ctl_table tmp = { in bpf_stats_handler() local
222 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in bpf_stats_handler()
243 struct ctl_table tmp = *table; in bpf_unpriv_handler() local
248 tmp.data = &unpriv_enable; in bpf_unpriv_handler()
249 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in bpf_unpriv_handler()
467 char *p, tmp[TMPBUFLEN]; in proc_get_long() local
476 memcpy(tmp, *buf, len); in proc_get_long()
478 tmp[len] = 0; in proc_get_long()
479 p = tmp; in proc_get_long()
491 len = p - tmp; in proc_get_long()
[all …]
Dfork.c484 struct vm_area_struct *mpnt, *tmp, *prev, **pprev, *last = NULL; in dup_mmap() local
544 tmp = vm_area_dup(mpnt); in dup_mmap()
545 if (!tmp) in dup_mmap()
547 retval = vma_dup_policy(mpnt, tmp); in dup_mmap()
550 tmp->vm_mm = mm; in dup_mmap()
551 retval = dup_userfaultfd(tmp, &uf); in dup_mmap()
554 if (tmp->vm_flags & VM_WIPEONFORK) { in dup_mmap()
560 tmp->anon_vma = NULL; in dup_mmap()
561 } else if (anon_vma_fork(tmp, mpnt)) in dup_mmap()
563 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); in dup_mmap()
[all …]
Dacct.c281 struct filename *tmp = getname(name); in SYSCALL_DEFINE1() local
283 if (IS_ERR(tmp)) in SYSCALL_DEFINE1()
284 return PTR_ERR(tmp); in SYSCALL_DEFINE1()
286 error = acct_on(tmp); in SYSCALL_DEFINE1()
288 putname(tmp); in SYSCALL_DEFINE1()
/kernel/power/
Dconsole.c48 struct pm_vt_switch *entry, *tmp; in pm_vt_switch_required() local
51 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required()
52 if (tmp->dev == dev) { in pm_vt_switch_required()
54 tmp->required = required; in pm_vt_switch_required()
80 struct pm_vt_switch *tmp; in pm_vt_switch_unregister() local
83 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister()
84 if (tmp->dev == dev) { in pm_vt_switch_unregister()
85 list_del(&tmp->head); in pm_vt_switch_unregister()
86 kfree(tmp); in pm_vt_switch_unregister()
Dswap.c959 struct swap_map_page_list *tmp; in release_swap_reader() local
964 tmp = handle->maps; in release_swap_reader()
966 kfree(tmp); in release_swap_reader()
975 struct swap_map_page_list *tmp, *last; in get_swap_reader() local
987 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL); in get_swap_reader()
988 if (!tmp) { in get_swap_reader()
993 handle->maps = tmp; in get_swap_reader()
995 last->next = tmp; in get_swap_reader()
996 last = tmp; in get_swap_reader()
998 tmp->map = (struct swap_map_page *) in get_swap_reader()
[all …]
/kernel/sched/
Disolation.c83 cpumask_var_t tmp; in housekeeping_setup() local
94 alloc_bootmem_cpumask_var(&tmp); in housekeeping_setup()
100 cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask); in housekeeping_setup()
101 if (cpumask_empty(tmp)) { in housekeeping_setup()
108 cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask); in housekeeping_setup()
109 if (cpumask_empty(tmp)) in housekeeping_setup()
111 cpumask_andnot(tmp, cpu_possible_mask, non_housekeeping_mask); in housekeeping_setup()
112 if (!cpumask_equal(tmp, housekeeping_mask)) { in housekeeping_setup()
114 free_bootmem_cpumask_var(tmp); in housekeeping_setup()
119 free_bootmem_cpumask_var(tmp); in housekeeping_setup()
Dtopology.c246 struct perf_domain *tmp; in free_pd() local
249 tmp = pd->next; in free_pd()
251 pd = tmp; in free_pd()
353 struct perf_domain *pd = NULL, *tmp; in build_perf_domains() local
387 tmp = pd_init(i); in build_perf_domains()
388 if (!tmp) in build_perf_domains()
390 tmp->next = pd; in build_perf_domains()
391 pd = tmp; in build_perf_domains()
411 tmp = rd->pd; in build_perf_domains()
413 if (tmp) in build_perf_domains()
[all …]
Dswait.c65 LIST_HEAD(tmp); in swake_up_all()
68 list_splice_init(&q->task_list, &tmp); in swake_up_all()
69 while (!list_empty(&tmp)) { in swake_up_all()
70 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all()
75 if (list_empty(&tmp)) in swake_up_all()
/kernel/cgroup/
Dcpuset.c504 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
513 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
514 pmask2 = &tmp->addmask; in alloc_cpumasks()
515 pmask3 = &tmp->delmask; in alloc_cpumasks()
546 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
554 if (tmp) { in free_cpumasks()
555 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
556 free_cpumask_var(tmp->addmask); in free_cpumasks()
557 free_cpumask_var(tmp->delmask); in free_cpumasks()
1215 struct tmpmasks *tmp) in update_parent_subparts_cpumask() argument
[all …]
/kernel/locking/
Drwsem.c401 struct rwsem_waiter *waiter, *tmp; in rwsem_mark_wake() local
498 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { in rwsem_mark_wake()
531 list_for_each_entry_safe(waiter, tmp, &wlist, list) { in rwsem_mark_wake()
1410 long tmp; in __down_read_trylock() local
1417 tmp = RWSEM_UNLOCKED_VALUE; in __down_read_trylock()
1419 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1420 tmp + RWSEM_READER_BIAS)) { in __down_read_trylock()
1425 } while (!(tmp & RWSEM_READ_FAILED_MASK)); in __down_read_trylock()
1434 long tmp = RWSEM_UNLOCKED_VALUE; in __down_write() local
1436 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_write()
[all …]
Dmutex.c1193 unsigned tmp; in ww_mutex_deadlock_injection() local
1196 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection()
1197 if (tmp > UINT_MAX/4) in ww_mutex_deadlock_injection()
1198 tmp = UINT_MAX; in ww_mutex_deadlock_injection()
1200 tmp = tmp*2 + tmp + tmp/2; in ww_mutex_deadlock_injection()
1202 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection()
1203 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection()
/kernel/trace/
Dtrace_probe.c210 char *tmp; in traceprobe_split_symbol_offset() local
216 tmp = strpbrk(symbol, "+-"); in traceprobe_split_symbol_offset()
217 if (tmp) { in traceprobe_split_symbol_offset()
218 ret = kstrtol(tmp, 0, offset); in traceprobe_split_symbol_offset()
221 *tmp = '\0'; in traceprobe_split_symbol_offset()
362 char *tmp; in parse_probe_arg() local
441 tmp = strchr(arg, '('); in parse_probe_arg()
442 if (!tmp) { in parse_probe_arg()
446 *tmp = '\0'; in parse_probe_arg()
452 offs += (tmp + 1 - arg) + (arg[0] != '-' ? 1 : 0); in parse_probe_arg()
[all …]
Dtrace_events_inject.c244 u8 tmp = (u8) val; in parse_entry() local
246 memcpy(entry + field->offset, &tmp, 1); in parse_entry()
250 u16 tmp = (u16) val; in parse_entry() local
252 memcpy(entry + field->offset, &tmp, 2); in parse_entry()
256 u32 tmp = (u32) val; in parse_entry() local
258 memcpy(entry + field->offset, &tmp, 4); in parse_entry()
Dtrace_uprobe.c476 struct trace_uprobe *tmp; in validate_ref_ctr_offset() local
479 for_each_trace_uprobe(tmp, pos) { in validate_ref_ctr_offset()
480 if (new_inode == d_real_inode(tmp->path.dentry) && in validate_ref_ctr_offset()
481 new->offset == tmp->offset && in validate_ref_ctr_offset()
482 new->ref_ctr_offset != tmp->ref_ctr_offset) { in validate_ref_ctr_offset()
542 char *arg, *filename, *rctr, *rctr_end, *tmp; in trace_uprobe_create() local
626 tmp = strchr(arg, '%'); in trace_uprobe_create()
627 if (tmp) { in trace_uprobe_create()
628 if (!strcmp(tmp, "%return")) { in trace_uprobe_create()
629 *tmp = '\0'; in trace_uprobe_create()
[all …]
/kernel/bpf/
Ddisasm.c204 char tmp[64]; in print_bpf_insn() local
212 tmp, sizeof(tmp))); in print_bpf_insn()
221 char tmp[64]; in print_bpf_insn() local
227 tmp, sizeof(tmp))); in print_bpf_insn()
229 strcpy(tmp, "unknown"); in print_bpf_insn()
232 tmp, sizeof(tmp)), in print_bpf_insn()
/kernel/time/
Dclocksource.c47 u64 tmp; in clocks_calc_mult_shift() local
54 tmp = ((u64)maxsec * from) >> 32; in clocks_calc_mult_shift()
55 while (tmp) { in clocks_calc_mult_shift()
56 tmp >>=1; in clocks_calc_mult_shift()
65 tmp = (u64) to << sft; in clocks_calc_mult_shift()
66 tmp += from / 2; in clocks_calc_mult_shift()
67 do_div(tmp, from); in clocks_calc_mult_shift()
68 if ((tmp >> sftacc) == 0) in clocks_calc_mult_shift()
71 *mult = tmp; in clocks_calc_mult_shift()
501 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local
[all …]
Dtimekeeping.c153 struct timespec64 tmp; in tk_set_wall_to_mono() local
159 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, in tk_set_wall_to_mono()
161 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); in tk_set_wall_to_mono()
163 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); in tk_set_wall_to_mono()
164 tk->offs_real = timespec64_to_ktime(tmp); in tk_set_wall_to_mono()
312 u64 tmp, ntpinterval; in tk_setup_internals() local
326 tmp = NTP_INTERVAL_LENGTH; in tk_setup_internals()
327 tmp <<= clock->shift; in tk_setup_internals()
328 ntpinterval = tmp; in tk_setup_internals()
329 tmp += clock->mult/2; in tk_setup_internals()
[all …]
/kernel/kcsan/
Dkcsan-test.c173 char tmp[2][64]; in report_matches() local
177 scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn); in report_matches()
178 scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn); in report_matches()
179 cmp = strcmp(tmp[0], tmp[1]); in report_matches()
918 long tmp; in test_atomic_builtins() local
928 tmp = 20L; in test_atomic_builtins()
929 KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L, in test_atomic_builtins()
932 KUNIT_EXPECT_EQ(test, tmp, 20L); in test_atomic_builtins()
934 KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L, in test_atomic_builtins()
937 KUNIT_EXPECT_EQ(test, tmp, 30L); in test_atomic_builtins()
/kernel/debug/kdb/
Dkdb_io.c203 char tmp; in kdb_read() local
241 tmp = *cp; in kdb_read()
245 *cp = tmp; in kdb_read()
263 tmp = *cp; in kdb_read()
267 *cp = tmp; in kdb_read()
377 tmp = *cp; in kdb_read()
381 *cp = tmp; in kdb_read()
/kernel/debug/
Dgdbstub.c241 char *tmp; in kgdb_mem2hex() local
248 tmp = buf + count; in kgdb_mem2hex()
250 err = copy_from_kernel_nofault(tmp, mem, count); in kgdb_mem2hex()
254 buf = hex_byte_pack(buf, *tmp); in kgdb_mem2hex()
255 tmp++; in kgdb_mem2hex()
960 int tmp; in gdb_serial_stub() local
1051 tmp = gdb_cmd_exception_pass(ks); in gdb_serial_stub()
1052 if (tmp > 0) in gdb_serial_stub()
1054 if (tmp == 0) in gdb_serial_stub()

123