/kernel/ |
D | pid.c | 57 .nr = 0, 152 idr_remove(&ns->idr, upid->nr); in free_pid() 164 int i, nr; in alloc_pid() local 212 nr = idr_alloc(&tmp->idr, NULL, tid, in alloc_pid() 218 if (nr == -ENOSPC) in alloc_pid() 219 nr = -EEXIST; in alloc_pid() 233 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, in alloc_pid() 239 if (nr < 0) { in alloc_pid() 240 retval = (nr == -ENOSPC) ? -EAGAIN : nr; in alloc_pid() 244 pid->numbers[i].nr = nr; in alloc_pid() [all …]
|
D | softirq.c | 685 inline void raise_softirq_irqoff(unsigned int nr) in raise_softirq_irqoff() argument 687 __raise_softirq_irqoff(nr); in raise_softirq_irqoff() 702 void raise_softirq(unsigned int nr) in raise_softirq() argument 707 raise_softirq_irqoff(nr); in raise_softirq() 711 void __raise_softirq_irqoff(unsigned int nr) in __raise_softirq_irqoff() argument 714 trace_softirq_raise(nr); in __raise_softirq_irqoff() 715 or_softirq_pending(1UL << nr); in __raise_softirq_irqoff() 718 void open_softirq(int nr, void (*action)(struct softirq_action *)) in open_softirq() argument 720 softirq_vec[nr].action = action; in open_softirq()
|
D | seccomp.c | 249 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data() 365 int syscall_nr = sd->nr; in seccomp_cache_check_allow() 736 case offsetof(struct seccomp_data, nr): in seccomp_is_const_allow() 737 reg_value = sd->nr; in seccomp_is_const_allow() 797 int nr; in seccomp_cache_prepare_bitmap() local 807 for (nr = 0; nr < bitmap_size; nr++) { in seccomp_cache_prepare_bitmap() 809 if (!test_bit(nr, bitmap)) in seccomp_cache_prepare_bitmap() 812 sd.nr = nr; in seccomp_cache_prepare_bitmap() 823 __clear_bit(nr, bitmap); in seccomp_cache_prepare_bitmap() 1308 this_syscall = sd ? sd->nr : in __secure_computing() [all …]
|
D | notifier.c | 116 int ret, nr = 0; in notifier_call_chain_robust() local 118 ret = notifier_call_chain(nl, val_up, v, -1, &nr); in notifier_call_chain_robust() 120 notifier_call_chain(nl, val_down, v, nr-1, NULL); in notifier_call_chain_robust()
|
D | pid_namespace.c | 168 int nr; in zap_pid_ns_processes() local 201 nr = 2; in zap_pid_ns_processes() 202 idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { in zap_pid_ns_processes()
|
D | fork.c | 1898 pid_t nr = -1; in pidfd_show_fdinfo() local 1902 nr = pid_nr_ns(pid, ns); in pidfd_show_fdinfo() 1905 seq_put_decimal_ll(m, "Pid:\t", nr); in pidfd_show_fdinfo() 1908 seq_put_decimal_ll(m, "\nNSpid:\t", nr); in pidfd_show_fdinfo() 1909 if (nr > 0) { in pidfd_show_fdinfo() 1918 seq_put_decimal_ll(m, "\t", pid->numbers[i].nr); in pidfd_show_fdinfo() 2626 pid_t nr; in kernel_clone() local 2675 nr = pid_vnr(pid); in kernel_clone() 2678 put_user(nr, args->parent_tid); in kernel_clone() 2705 return nr; in kernel_clone()
|
D | ptrace.c | 751 if (arg.nr < 0) in ptrace_peek_siginfo() 763 for (i = 0; i < arg.nr; ) { in ptrace_peek_siginfo() 962 info->entry.nr = syscall_get_nr(child, regs); in ptrace_get_syscall_info_entry()
|
/kernel/bpf/ |
D | stackmap.c | 22 u32 nr; member 232 entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, in get_callchain_entry_for_task() 245 for (i = entry->nr - 1; i >= 0; i--) in get_callchain_entry_for_task() 268 if (trace->nr <= skip) in __bpf_get_stackid() 272 trace_nr = trace->nr - skip; in __bpf_get_stackid() 290 new_bucket->nr = trace_nr; in __bpf_get_stackid() 295 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 305 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 319 new_bucket->nr = trace_nr; in __bpf_get_stackid() 367 while (nr_kernel < trace->nr) { in count_kernel_ip() [all …]
|
/kernel/locking/ |
D | lockdep_proc.c | 412 unsigned long nl, nr; in lock_stat_cmp() local 414 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; in lock_stat_cmp() 415 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr; in lock_stat_cmp() 417 return nr - nl; in lock_stat_cmp() 431 static void snprint_time(char *buf, size_t bufsiz, s64 nr) in snprint_time() argument 436 nr += 5; /* for display rounding */ in snprint_time() 437 div = div_s64_rem(nr, 1000, &rem); in snprint_time() 451 seq_printf(m, "%14lu", lt->nr); in seq_lock_time() 455 seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); in seq_lock_time() 505 if (stats->write_holdtime.nr) { in seq_stats() [all …]
|
D | lockdep.c | 241 if (time < lt->min || !lt->nr) in lock_time_inc() 245 lt->nr++; in lock_time_inc() 250 if (!src->nr) in lock_time_add() 256 if (src->min < dst->min || !dst->nr) in lock_time_add() 260 dst->nr += src->nr; in lock_time_add()
|
/kernel/trace/ |
D | trace_syscalls.c | 104 static struct syscall_metadata *syscall_nr_to_meta(int nr) in syscall_nr_to_meta() argument 107 return xa_load(&syscalls_metadata_sparse, (unsigned long)nr); in syscall_nr_to_meta() 109 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) in syscall_nr_to_meta() 112 return syscalls_metadata[nr]; in syscall_nr_to_meta() 138 syscall = trace->nr; in print_syscall_enter() 184 syscall = trace->nr; in print_syscall_exit() 327 entry->nr = syscall_nr; in ftrace_syscall_enter() 364 entry->nr = syscall_nr; in ftrace_syscall_exit() 506 unsigned long __init __weak arch_syscall_addr(int nr) in arch_syscall_addr() argument 508 return (unsigned long)sys_call_table[nr]; in arch_syscall_addr() [all …]
|
D | trace.h | 128 int nr; member 134 int nr; member
|
D | bpf_trace.c | 1351 return br_stack->nr * br_entry_size; in BPF_CALL_4() 1356 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); in BPF_CALL_4()
|
/kernel/events/ |
D | hw_breakpoint.c | 141 int nr; in fetch_bp_busy_slots() local 143 nr = info->cpu_pinned; in fetch_bp_busy_slots() 145 nr += max_task_bp_pinned(cpu, type); in fetch_bp_busy_slots() 147 nr += task_bp_pinned(cpu, bp, type); in fetch_bp_busy_slots() 149 if (nr > slots->pinned) in fetch_bp_busy_slots() 150 slots->pinned = nr; in fetch_bp_busy_slots() 152 nr = info->flexible; in fetch_bp_busy_slots() 153 if (nr > slots->flexible) in fetch_bp_busy_slots() 154 slots->flexible = nr; in fetch_bp_busy_slots()
|
D | callchain.c | 193 ctx.nr = entry->nr = init_nr; in get_perf_callchain()
|
D | core.c | 1438 u32 nr; in perf_event_pid_type() local 1445 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type() 1447 if (!nr && !pid_alive(p)) in perf_event_pid_type() 1448 nr = -1; in perf_event_pid_type() 1449 return nr; in perf_event_pid_type() 1906 int nr = 1; in __perf_event_read_size() local 1918 nr += nr_siblings; in __perf_event_read_size() 1926 return size + nr * entry; in __perf_event_read_size() 3765 itrs[heap->nr] = event; in __heap_add() 3766 heap->nr++; in __heap_add() [all …]
|
D | ring_buffer.c | 889 int i, nr; in rb_free_work() local 892 nr = data_page_nr(rb); in rb_free_work() 896 for (i = 0; i <= nr; i++) in rb_free_work()
|
/kernel/rcu/ |
D | rcuscale.c | 510 int nr; in compute_real() local 513 nr = n; in compute_real() 515 nr = num_online_cpus() + 1 + n; in compute_real() 516 if (nr <= 0) in compute_real() 517 nr = 1; in compute_real() 519 return nr; in compute_real()
|
/kernel/entry/ |
D | common.c | 266 unsigned long nr = syscall_get_nr(current, regs); in syscall_exit_to_user_mode_prepare() local 271 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) in syscall_exit_to_user_mode_prepare()
|
/kernel/irq/ |
D | irqdesc.c | 515 static int irq_expand_nr_irqs(unsigned int nr) in irq_expand_nr_irqs() argument 517 if (nr > IRQ_BITMAP_BITS) in irq_expand_nr_irqs() 519 nr_irqs = nr; in irq_expand_nr_irqs() 617 static int irq_expand_nr_irqs(unsigned int nr) in irq_expand_nr_irqs() argument
|
D | internals.h | 108 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
/kernel/sched/ |
D | wait.c | 165 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) in __wake_up_locked() argument 167 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); in __wake_up_locked()
|
D | fair.c | 3731 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg() 3739 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg() 3973 ++cfs_rq->removed.nr; in remove_entity_load_avg() 6525 int i, cpu, idle_cpu = -1, nr = INT_MAX; in select_idle_cpu() local 6559 nr = div_u64(span_avg, avg_cost); in select_idle_cpu() 6561 nr = 4; in select_idle_cpu() 6570 nr = READ_ONCE(sd_share->nr_idle_scan) + 1; in select_idle_cpu() 6572 if (nr == 1) in select_idle_cpu() 6584 if (!--nr) in select_idle_cpu()
|
D | sched.h | 581 int nr; member
|
/kernel/time/ |
D | posix-timers.c | 113 static int hash(struct signal_struct *sig, unsigned int nr) in hash() argument 115 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); in hash()
|