/kernel/ |
D | delayacct.c | 125 s64 tmp; in delayacct_add_tsk() local 128 tmp = (s64)d->cpu_run_real_total; in delayacct_add_tsk() 129 tmp += utime + stime; in delayacct_add_tsk() 130 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; in delayacct_add_tsk() 133 tmp = (s64)d->cpu_scaled_run_real_total; in delayacct_add_tsk() 134 tmp += utimescaled + stimescaled; in delayacct_add_tsk() 136 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; in delayacct_add_tsk() 148 tmp = (s64)d->cpu_delay_total + t2; in delayacct_add_tsk() 149 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; in delayacct_add_tsk() 151 tmp = (s64)d->cpu_run_virtual_total + t3; in delayacct_add_tsk() [all …]
|
D | resource.c | 163 struct resource *tmp, **p; in __request_resource() local 173 tmp = *p; in __request_resource() 174 if (!tmp || tmp->start > end) { in __request_resource() 175 new->sibling = tmp; in __request_resource() 180 p = &tmp->sibling; in __request_resource() 181 if (tmp->end < start) in __request_resource() 183 return tmp; in __request_resource() 189 struct resource *tmp, **p, *chd; in __release_resource() local 193 tmp = *p; in __release_resource() 194 if (!tmp) in __release_resource() [all …]
|
D | crash_core.c | 45 char *cur = cmdline, *tmp; in parse_crashkernel_mem() local 52 start = memparse(cur, &tmp); in parse_crashkernel_mem() 53 if (cur == tmp) { in parse_crashkernel_mem() 57 cur = tmp; in parse_crashkernel_mem() 66 end = memparse(cur, &tmp); in parse_crashkernel_mem() 67 if (cur == tmp) { in parse_crashkernel_mem() 71 cur = tmp; in parse_crashkernel_mem() 84 size = memparse(cur, &tmp); in parse_crashkernel_mem() 85 if (cur == tmp) { in parse_crashkernel_mem() 89 cur = tmp; in parse_crashkernel_mem() [all …]
|
D | pid.c | 165 struct pid_namespace *tmp; in alloc_pid() local 184 tmp = ns; in alloc_pid() 200 if (tid != 1 && !tmp->child_reaper) in alloc_pid() 203 if (!checkpoint_restore_ns_capable(tmp->user_ns)) in alloc_pid() 212 nr = idr_alloc(&tmp->idr, NULL, tid, in alloc_pid() 226 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) in alloc_pid() 233 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, in alloc_pid() 245 pid->numbers[i].ns = tmp; in alloc_pid() 246 tmp = tmp->parent; in alloc_pid() 342 int tmp; in __change_pid() local [all …]
|
D | sysctl.c | 200 struct ctl_table tmp = { in bpf_stats_handler() local 213 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in bpf_stats_handler() 234 struct ctl_table tmp = *table; in bpf_unpriv_handler() local 239 tmp.data = &unpriv_enable; in bpf_unpriv_handler() 240 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in bpf_unpriv_handler() 458 char *p, tmp[TMPBUFLEN]; in proc_get_long() local 467 memcpy(tmp, *buf, len); in proc_get_long() 469 tmp[len] = 0; in proc_get_long() 470 p = tmp; in proc_get_long() 482 len = p - tmp; in proc_get_long() [all …]
|
D | sys.c | 1020 struct tms tmp; in SYSCALL_DEFINE1() local 1022 do_sys_times(&tmp); in SYSCALL_DEFINE1() 1023 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) in SYSCALL_DEFINE1() 1040 struct compat_tms tmp; in COMPAT_SYSCALL_DEFINE1() local 1044 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); in COMPAT_SYSCALL_DEFINE1() 1045 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); in COMPAT_SYSCALL_DEFINE1() 1046 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); in COMPAT_SYSCALL_DEFINE1() 1047 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); in COMPAT_SYSCALL_DEFINE1() 1048 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) in COMPAT_SYSCALL_DEFINE1() 1302 struct new_utsname tmp; in SYSCALL_DEFINE1() local [all …]
|
D | fork.c | 541 struct vm_area_struct *mpnt, *tmp, *prev, **pprev; in dup_mmap() local 601 tmp = vm_area_dup(mpnt); in dup_mmap() 602 if (!tmp) in dup_mmap() 604 retval = vma_dup_policy(mpnt, tmp); in dup_mmap() 607 tmp->vm_mm = mm; in dup_mmap() 608 retval = dup_userfaultfd(tmp, &uf); in dup_mmap() 611 if (tmp->vm_flags & VM_WIPEONFORK) { in dup_mmap() 617 tmp->anon_vma = NULL; in dup_mmap() 618 } else if (anon_vma_fork(tmp, mpnt)) in dup_mmap() 620 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); in dup_mmap() [all …]
|
/kernel/power/ |
D | console.c | 48 struct pm_vt_switch *entry, *tmp; in pm_vt_switch_required() local 51 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_required() 52 if (tmp->dev == dev) { in pm_vt_switch_required() 54 tmp->required = required; in pm_vt_switch_required() 80 struct pm_vt_switch *tmp; in pm_vt_switch_unregister() local 83 list_for_each_entry(tmp, &pm_vt_switch_list, head) { in pm_vt_switch_unregister() 84 if (tmp->dev == dev) { in pm_vt_switch_unregister() 85 list_del(&tmp->head); in pm_vt_switch_unregister() 86 kfree(tmp); in pm_vt_switch_unregister()
|
D | swap.c | 959 struct swap_map_page_list *tmp; in release_swap_reader() local 964 tmp = handle->maps; in release_swap_reader() 966 kfree(tmp); in release_swap_reader() 975 struct swap_map_page_list *tmp, *last; in get_swap_reader() local 987 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL); in get_swap_reader() 988 if (!tmp) { in get_swap_reader() 993 handle->maps = tmp; in get_swap_reader() 995 last->next = tmp; in get_swap_reader() 996 last = tmp; in get_swap_reader() 998 tmp->map = (struct swap_map_page *) in get_swap_reader() [all …]
|
/kernel/sched/ |
D | isolation.c | 83 cpumask_var_t tmp; in housekeeping_setup() local 92 alloc_bootmem_cpumask_var(&tmp); in housekeeping_setup() 98 cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask); in housekeeping_setup() 99 if (cpumask_empty(tmp)) { in housekeeping_setup() 106 cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask); in housekeeping_setup() 107 if (cpumask_empty(tmp)) in housekeeping_setup() 109 cpumask_andnot(tmp, cpu_possible_mask, non_housekeeping_mask); in housekeeping_setup() 110 if (!cpumask_equal(tmp, housekeeping_mask)) { in housekeeping_setup() 112 free_bootmem_cpumask_var(tmp); in housekeeping_setup() 117 free_bootmem_cpumask_var(tmp); in housekeeping_setup()
|
D | topology.c | 250 struct perf_domain *tmp; in free_pd() local 253 tmp = pd->next; in free_pd() 255 pd = tmp; in free_pd() 358 struct perf_domain *pd = NULL, *tmp; in build_perf_domains() local 400 tmp = pd_init(i); in build_perf_domains() 401 if (!tmp) in build_perf_domains() 403 tmp->next = pd; in build_perf_domains() 404 pd = tmp; in build_perf_domains() 424 tmp = rd->pd; in build_perf_domains() 426 if (tmp) in build_perf_domains() [all …]
|
D | swait.c | 65 LIST_HEAD(tmp); in swake_up_all() 68 list_splice_init(&q->task_list, &tmp); in swake_up_all() 69 while (!list_empty(&tmp)) { in swake_up_all() 70 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all() 75 if (list_empty(&tmp)) in swake_up_all()
|
/kernel/cgroup/ |
D | cpuset.c | 520 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument 529 pmask1 = &tmp->new_cpus; in alloc_cpumasks() 530 pmask2 = &tmp->addmask; in alloc_cpumasks() 531 pmask3 = &tmp->delmask; in alloc_cpumasks() 562 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument 570 if (tmp) { in free_cpumasks() 571 free_cpumask_var(tmp->new_cpus); in free_cpumasks() 572 free_cpumask_var(tmp->addmask); in free_cpumasks() 573 free_cpumask_var(tmp->delmask); in free_cpumasks() 1233 struct tmpmasks *tmp) in update_parent_subparts_cpumask() argument [all …]
|
/kernel/trace/ |
D | trace_probe.c | 212 char *tmp; in traceprobe_split_symbol_offset() local 218 tmp = strpbrk(symbol, "+-"); in traceprobe_split_symbol_offset() 219 if (tmp) { in traceprobe_split_symbol_offset() 220 ret = kstrtol(tmp, 0, offset); in traceprobe_split_symbol_offset() 223 *tmp = '\0'; in traceprobe_split_symbol_offset() 376 char *tmp; in parse_probe_arg() local 460 tmp = strchr(arg, '('); in parse_probe_arg() 461 if (!tmp) { in parse_probe_arg() 465 *tmp = '\0'; in parse_probe_arg() 471 offs += (tmp + 1 - arg) + (arg[0] != '-' ? 1 : 0); in parse_probe_arg() [all …]
|
D | trace_events_inject.c | 242 u8 tmp = (u8) val; in parse_entry() local 244 memcpy(entry + field->offset, &tmp, 1); in parse_entry() 248 u16 tmp = (u16) val; in parse_entry() local 250 memcpy(entry + field->offset, &tmp, 2); in parse_entry() 254 u32 tmp = (u32) val; in parse_entry() local 256 memcpy(entry + field->offset, &tmp, 4); in parse_entry()
|
D | trace_boot.c | 424 char *tmp; in trace_boot_init_histograms() local 432 tmp = kstrdup(buf, GFP_KERNEL); in trace_boot_init_histograms() 433 if (!tmp) in trace_boot_init_histograms() 436 pr_err("Failed to apply hist trigger: %s\n", tmp); in trace_boot_init_histograms() 437 kfree(tmp); in trace_boot_init_histograms() 443 tmp = kstrdup(buf, GFP_KERNEL); in trace_boot_init_histograms() 444 if (!tmp) in trace_boot_init_histograms() 447 pr_err("Failed to apply hist trigger: %s\n", tmp); in trace_boot_init_histograms() 448 kfree(tmp); in trace_boot_init_histograms()
|
D | trace_osnoise.c | 1880 struct dentry *tmp; in init_tracefs() local 1891 tmp = tracefs_create_file("period_us", TRACE_MODE_WRITE, top_dir, in init_tracefs() 1893 if (!tmp) in init_tracefs() 1896 tmp = tracefs_create_file("runtime_us", TRACE_MODE_WRITE, top_dir, in init_tracefs() 1898 if (!tmp) in init_tracefs() 1901 tmp = tracefs_create_file("stop_tracing_us", TRACE_MODE_WRITE, top_dir, in init_tracefs() 1903 if (!tmp) in init_tracefs() 1906 tmp = tracefs_create_file("stop_tracing_total_us", TRACE_MODE_WRITE, top_dir, in init_tracefs() 1908 if (!tmp) in init_tracefs() 1911 tmp = trace_create_file("cpus", TRACE_MODE_WRITE, top_dir, NULL, &cpus_fops); in init_tracefs() [all …]
|
/kernel/locking/ |
D | rwsem.c | 256 long tmp = RWSEM_UNLOCKED_VALUE; in rwsem_write_trylock() local 258 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock() 400 struct rwsem_waiter *waiter, *tmp; in rwsem_mark_wake() local 495 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { in rwsem_mark_wake() 534 list_for_each_entry_safe(waiter, tmp, &wlist, list) { in rwsem_mark_wake() 1290 long tmp; in __down_read_trylock() local 1295 tmp = atomic_long_read(&sem->count); in __down_read_trylock() 1296 while (!(tmp & RWSEM_READ_FAILED_MASK)) { in __down_read_trylock() 1297 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock() 1298 tmp + RWSEM_READER_BIAS)) { in __down_read_trylock() [all …]
|
D | mutex.c | 811 unsigned tmp; in ww_mutex_deadlock_injection() local 814 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection() 815 if (tmp > UINT_MAX/4) in ww_mutex_deadlock_injection() 816 tmp = UINT_MAX; in ww_mutex_deadlock_injection() 818 tmp = tmp*2 + tmp + tmp/2; in ww_mutex_deadlock_injection() 820 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection() 821 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection()
|
/kernel/bpf/ |
D | disasm.c | 248 char tmp[64]; in print_bpf_insn() local 256 tmp, sizeof(tmp))); in print_bpf_insn() 265 char tmp[64]; in print_bpf_insn() local 271 tmp, sizeof(tmp))); in print_bpf_insn() 273 strcpy(tmp, "unknown"); in print_bpf_insn() 276 tmp, sizeof(tmp)), in print_bpf_insn()
|
/kernel/time/ |
D | clocksource.c | 49 u64 tmp; in clocks_calc_mult_shift() local 56 tmp = ((u64)maxsec * from) >> 32; in clocks_calc_mult_shift() 57 while (tmp) { in clocks_calc_mult_shift() 58 tmp >>=1; in clocks_calc_mult_shift() 67 tmp = (u64) to << sft; in clocks_calc_mult_shift() 68 tmp += from / 2; in clocks_calc_mult_shift() 69 do_div(tmp, from); in clocks_calc_mult_shift() 70 if ((tmp >> sftacc) == 0) in clocks_calc_mult_shift() 73 *mult = tmp; in clocks_calc_mult_shift() 655 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local [all …]
|
D | timekeeping.c | 154 struct timespec64 tmp; in tk_set_wall_to_mono() local 160 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, in tk_set_wall_to_mono() 162 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); in tk_set_wall_to_mono() 164 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); in tk_set_wall_to_mono() 165 tk->offs_real = timespec64_to_ktime(tmp); in tk_set_wall_to_mono() 313 u64 tmp, ntpinterval; in tk_setup_internals() local 327 tmp = NTP_INTERVAL_LENGTH; in tk_setup_internals() 328 tmp <<= clock->shift; in tk_setup_internals() 329 ntpinterval = tmp; in tk_setup_internals() 330 tmp += clock->mult/2; in tk_setup_internals() [all …]
|
/kernel/kcsan/ |
D | kcsan_test.c | 175 char tmp[2][64]; in report_matches() local 179 scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn); in report_matches() 180 scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn); in report_matches() 181 cmp = strcmp(tmp[0], tmp[1]); in report_matches() 928 long tmp; in test_atomic_builtins() local 938 tmp = 20L; in test_atomic_builtins() 939 KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L, in test_atomic_builtins() 942 KUNIT_EXPECT_EQ(test, tmp, 20L); in test_atomic_builtins() 944 KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L, in test_atomic_builtins() 947 KUNIT_EXPECT_EQ(test, tmp, 30L); in test_atomic_builtins()
|
/kernel/debug/kdb/ |
D | kdb_io.c | 203 char tmp; in kdb_read() local 241 tmp = *cp; in kdb_read() 245 *cp = tmp; in kdb_read() 263 tmp = *cp; in kdb_read() 267 *cp = tmp; in kdb_read() 377 tmp = *cp; in kdb_read() 381 *cp = tmp; in kdb_read()
|
/kernel/debug/ |
D | gdbstub.c | 238 char *tmp; in kgdb_mem2hex() local 245 tmp = buf + count; in kgdb_mem2hex() 247 err = copy_from_kernel_nofault(tmp, mem, count); in kgdb_mem2hex() 251 buf = hex_byte_pack(buf, *tmp); in kgdb_mem2hex() 252 tmp++; in kgdb_mem2hex() 957 int tmp; in gdb_serial_stub() local 1048 tmp = gdb_cmd_exception_pass(ks); in gdb_serial_stub() 1049 if (tmp > 0) in gdb_serial_stub() 1051 if (tmp == 0) in gdb_serial_stub()
|