/kernel/ |
D | pid.c | 55 .nr = 0, 153 idr_remove(&ns->idr, upid->nr); in free_pid() 164 int i, nr; in alloc_pid() local 193 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, in alloc_pid() 198 if (nr < 0) { in alloc_pid() 199 retval = (nr == -ENOSPC) ? -EAGAIN : nr; in alloc_pid() 203 pid->numbers[i].nr = nr; in alloc_pid() 226 idr_replace(&upid->ns->idr, pid, upid->nr); in alloc_pid() 241 idr_remove(&upid->ns->idr, upid->nr); in alloc_pid() 261 struct pid *find_pid_ns(int nr, struct pid_namespace *ns) in find_pid_ns() argument [all …]
|
D | softirq.c | 423 inline void raise_softirq_irqoff(unsigned int nr) in raise_softirq_irqoff() argument 425 __raise_softirq_irqoff(nr); in raise_softirq_irqoff() 440 void raise_softirq(unsigned int nr) in raise_softirq() argument 445 raise_softirq_irqoff(nr); in raise_softirq() 449 void __raise_softirq_irqoff(unsigned int nr) in __raise_softirq_irqoff() argument 451 trace_softirq_raise(nr); in __raise_softirq_irqoff() 452 or_softirq_pending(1UL << nr); in __raise_softirq_irqoff() 455 void open_softirq(int nr, void (*action)(struct softirq_action *)) in open_softirq() argument 457 softirq_vec[nr].action = action; in open_softirq()
|
D | pid_namespace.c | 183 int nr; in zap_pid_ns_processes() local 216 nr = 2; in zap_pid_ns_processes() 217 idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { in zap_pid_ns_processes()
|
D | ptrace.c | 723 if (arg.nr < 0) in ptrace_peek_siginfo() 735 for (i = 0; i < arg.nr; ) { in ptrace_peek_siginfo() 916 info->entry.nr = syscall_get_nr(child, regs); in ptrace_get_syscall_info_entry()
|
D | fork.c | 2349 long nr; in _do_fork() local 2384 nr = pid_vnr(pid); in _do_fork() 2387 put_user(nr, args->parent_tid); in _do_fork() 2407 return nr; in _do_fork()
|
D | seccomp.c | 150 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data() 929 this_syscall = sd ? sd->nr : in __secure_computing()
|
/kernel/locking/ |
D | lockdep_proc.c | 360 unsigned long nl, nr; in lock_stat_cmp() local 362 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; in lock_stat_cmp() 363 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr; in lock_stat_cmp() 365 return nr - nl; in lock_stat_cmp() 379 static void snprint_time(char *buf, size_t bufsiz, s64 nr) in snprint_time() argument 384 nr += 5; /* for display rounding */ in snprint_time() 385 div = div_s64_rem(nr, 1000, &rem); in snprint_time() 399 seq_printf(m, "%14lu", lt->nr); in seq_lock_time() 403 seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0); in seq_lock_time() 453 if (stats->write_holdtime.nr) { in seq_stats() [all …]
|
D | lockdep.c | 207 if (time < lt->min || !lt->nr) in lock_time_inc() 211 lt->nr++; in lock_time_inc() 216 if (!src->nr) in lock_time_add() 222 if (src->min < dst->min || !dst->nr) in lock_time_add() 226 dst->nr += src->nr; in lock_time_add() 1408 unsigned long nr; in mark_lock_accessed() local 1410 nr = lock - list_entries; in mark_lock_accessed() 1411 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ in mark_lock_accessed() 1418 unsigned long nr; in lock_accessed() local 1420 nr = lock - list_entries; in lock_accessed() [all …]
|
/kernel/trace/ |
D | trace_syscalls.c | 102 static struct syscall_metadata *syscall_nr_to_meta(int nr) in syscall_nr_to_meta() argument 104 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) in syscall_nr_to_meta() 107 return syscalls_metadata[nr]; in syscall_nr_to_meta() 133 syscall = trace->nr; in print_syscall_enter() 179 syscall = trace->nr; in print_syscall_exit() 276 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), in syscall_enter_define_fields() 297 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), in syscall_exit_define_fields() 350 entry->nr = syscall_nr; in ftrace_syscall_enter() 397 entry->nr = syscall_nr; in ftrace_syscall_exit() 529 unsigned long __init __weak arch_syscall_addr(int nr) in arch_syscall_addr() argument [all …]
|
D | trace.h | 100 int nr; member 106 int nr; member
|
/kernel/events/ |
D | hw_breakpoint.c | 141 int nr; in fetch_bp_busy_slots() local 143 nr = info->cpu_pinned; in fetch_bp_busy_slots() 145 nr += max_task_bp_pinned(cpu, type); in fetch_bp_busy_slots() 147 nr += task_bp_pinned(cpu, bp, type); in fetch_bp_busy_slots() 149 if (nr > slots->pinned) in fetch_bp_busy_slots() 150 slots->pinned = nr; in fetch_bp_busy_slots() 152 nr = info->flexible; in fetch_bp_busy_slots() 153 if (nr > slots->flexible) in fetch_bp_busy_slots() 154 slots->flexible = nr; in fetch_bp_busy_slots()
|
D | callchain.c | 194 ctx.nr = entry->nr = init_nr; in get_perf_callchain()
|
D | ring_buffer.c | 847 int i, nr; in rb_free_work() local 850 nr = data_page_nr(rb); in rb_free_work() 854 for (i = 0; i <= nr; i++) in rb_free_work()
|
D | core.c | 1323 u32 nr; in perf_event_pid_type() local 1330 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type() 1332 if (!nr && !pid_alive(p)) in perf_event_pid_type() 1333 nr = -1; in perf_event_pid_type() 1334 return nr; in perf_event_pid_type() 1707 int nr = 1; in __perf_event_read_size() local 1719 nr += nr_siblings; in __perf_event_read_size() 1723 size += entry * nr; in __perf_event_read_size() 6404 size += data->callchain->nr; in perf_output_sample() 6446 size = data->br_stack->nr in perf_output_sample() [all …]
|
/kernel/bpf/ |
D | stackmap.c | 21 u32 nr; member 376 trace_nr = trace->nr - init_nr; in BPF_CALL_3() 400 new_bucket->nr = trace_nr; in BPF_CALL_3() 405 if (hash_matches && bucket->nr == trace_nr && in BPF_CALL_3() 415 if (hash_matches && bucket->nr == trace_nr && in BPF_CALL_3() 429 new_bucket->nr = trace_nr; in BPF_CALL_3() 479 trace_nr = trace->nr - init_nr; in BPF_CALL_4() 533 trace_len = bucket->nr * stack_map_data_size(map); in bpf_stackmap_copy()
|
/kernel/rcu/ |
D | rcuperf.c | 573 int nr; in compute_real() local 576 nr = n; in compute_real() 578 nr = num_online_cpus() + 1 + n; in compute_real() 579 if (nr <= 0) in compute_real() 580 nr = 1; in compute_real() 582 return nr; in compute_real()
|
/kernel/irq/ |
D | irqdesc.c | 511 static int irq_expand_nr_irqs(unsigned int nr) in irq_expand_nr_irqs() argument 513 if (nr > IRQ_BITMAP_BITS) in irq_expand_nr_irqs() 515 nr_irqs = nr; in irq_expand_nr_irqs() 612 static int irq_expand_nr_irqs(unsigned int nr) in irq_expand_nr_irqs() argument
|
D | internals.h | 104 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
/kernel/sched/ |
D | wait.c | 149 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) in __wake_up_locked() argument 151 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); in __wake_up_locked()
|
D | fair.c | 3476 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg() 3484 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg() 3676 ++cfs_rq->removed.nr; in remove_entity_load_avg() 5955 int cpu, nr = INT_MAX, si_cpu = -1; in select_idle_cpu() local 5974 nr = div_u64(span_avg, avg_cost); in select_idle_cpu() 5976 nr = 4; in select_idle_cpu() 5982 if (!--nr) in select_idle_cpu()
|
D | sched.h | 530 int nr; member
|
/kernel/time/ |
D | posix-timers.c | 112 static int hash(struct signal_struct *sig, unsigned int nr) in hash() argument 114 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); in hash()
|