/kernel/ |
D | signal.c | 3345 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) in copy_siginfo_to_user() argument 3348 if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) in copy_siginfo_to_user() 3356 const siginfo_t __user *from) in post_copy_siginfo_from_user() argument 3359 char __user *expansion = si_expansion(from); in post_copy_siginfo_from_user() 3379 const siginfo_t __user *from) in __copy_siginfo_from_user() argument 3381 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) in __copy_siginfo_from_user() 3384 return post_copy_siginfo_from_user(to, from); in __copy_siginfo_from_user() 3387 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) in copy_siginfo_from_user() argument 3389 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) in copy_siginfo_from_user() 3391 return post_copy_siginfo_from_user(to, from); in copy_siginfo_from_user() [all …]
|
D | softirq.c | 1065 unsigned int __weak arch_dynirq_lower_bound(unsigned int from) in arch_dynirq_lower_bound() argument 1067 return from; in arch_dynirq_lower_bound()
|
/kernel/irq/ |
D | devres.c | 150 unsigned int from; member 158 irq_free_descs(this->from, this->cnt); in devm_irq_desc_release() 178 int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, in __devm_irq_alloc_descs() argument 189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs() 195 dr->from = base; in __devm_irq_alloc_descs()
|
D | irqdesc.c | 758 void irq_free_descs(unsigned int from, unsigned int cnt) in irq_free_descs() argument 762 if (from >= nr_irqs || (from + cnt) > nr_irqs) in irq_free_descs() 767 free_desc(from + i); in irq_free_descs() 769 bitmap_clear(allocated_irqs, from, cnt); in irq_free_descs() 788 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, in __irq_alloc_descs() argument 797 if (from > irq) in __irq_alloc_descs() 799 from = irq; in __irq_alloc_descs() 806 from = arch_dynirq_lower_bound(from); in __irq_alloc_descs() 812 from, cnt, 0); in __irq_alloc_descs()
|
/kernel/events/ |
D | ring_buffer.c | 575 unsigned long from, unsigned long to) in perf_output_copy_aux() argument 581 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux() 585 tocopy = PAGE_SIZE - offset_in_page(from); in perf_output_copy_aux() 586 if (to > from) in perf_output_copy_aux() 587 tocopy = min(tocopy, to - from); in perf_output_copy_aux() 591 addr = rb->aux_pages[from >> PAGE_SHIFT]; in perf_output_copy_aux() 592 addr += offset_in_page(from); in perf_output_copy_aux() 599 from += tocopy; in perf_output_copy_aux() 600 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux() 601 } while (to != from); in perf_output_copy_aux()
|
D | hw_breakpoint.c | 780 struct perf_event_attr *from) in hw_breakpoint_copy_attr() argument 782 to->bp_addr = from->bp_addr; in hw_breakpoint_copy_attr() 783 to->bp_type = from->bp_type; in hw_breakpoint_copy_attr() 784 to->bp_len = from->bp_len; in hw_breakpoint_copy_attr() 785 to->disabled = from->disabled; in hw_breakpoint_copy_attr()
|
/kernel/printk/ |
D | printk_ringbuffer.h | 355 #define prb_for_each_record(from, rb, s, r) \ argument 356 for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1) 373 #define prb_for_each_info(from, rb, s, i, lc) \ argument 374 for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1)
|
/kernel/bpf/ |
D | core.c | 1218 static int bpf_jit_blind_insn(const struct bpf_insn *from, in bpf_jit_blind_insn() argument 1247 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) in bpf_jit_blind_insn() 1250 if (from->imm == 0 && in bpf_jit_blind_insn() 1251 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || in bpf_jit_blind_insn() 1252 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { in bpf_jit_blind_insn() 1253 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); in bpf_jit_blind_insn() 1257 switch (from->code) { in bpf_jit_blind_insn() 1267 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn() 1269 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); in bpf_jit_blind_insn() 1281 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn() [all …]
|
D | dispatcher.c | 132 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, in bpf_dispatcher_change_prog() argument 138 if (from == to) in bpf_dispatcher_change_prog() 159 changed |= bpf_dispatcher_remove_prog(d, from); in bpf_dispatcher_change_prog()
|
D | bpf_iter.c | 116 err = copy_to_user(buf, seq->buf + seq->from, n); in bpf_seq_read() 122 seq->from += n; in bpf_seq_read() 127 seq->from = 0; in bpf_seq_read() 232 seq->from = n; in bpf_seq_read()
|
/kernel/time/ |
D | tick-common.c | 197 int from = tick_do_timer_boot_cpu; in tick_take_do_timer_from_boot() local 199 if (from >= 0 && from != cpu) in tick_take_do_timer_from_boot() 200 smp_call_function_single(from, giveup_do_timer, &cpu, 1); in tick_take_do_timer_from_boot()
|
D | Kconfig | 16 # Architecture has extra clocksource init called from registration 51 # Select to handle posix CPU timers from task_work 52 # and not from the timer interrupt context
|
D | clocksource.c | 47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) in clocks_calc_mult_shift() argument 56 tmp = ((u64)maxsec * from) >> 32; in clocks_calc_mult_shift() 68 tmp += from / 2; in clocks_calc_mult_shift() 69 do_div(tmp, from); in clocks_calc_mult_shift()
|
/kernel/rcu/ |
D | Kconfig | 37 designed for UP systems from which real-time response 48 side-effect of preventing "make oldconfig" from asking you all 206 This option also prevents heavy loads from blocking RCU 240 bool "Offload RCU callback processing from boot-selected CPUs" 253 This option offloads callback invocation from the set of CPUs 259 from running on the specified CPUs, but (1) the kthreads may be 268 bool "Offload RCU callback processing from all CPUs by default" 272 Use this option to offload callback processing from all CPUs 281 bool "Offload RCU callback from real-time kthread" 339 Default value of the minimum time in milliseconds from the start of boot [all …]
|
D | rcu_segcblist.c | 122 static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to) in rcu_segcblist_move_seglen() argument 126 if (from == to) in rcu_segcblist_move_seglen() 129 len = rcu_segcblist_get_seglen(rsclp, from); in rcu_segcblist_move_seglen() 134 rcu_segcblist_set_seglen(rsclp, from, 0); in rcu_segcblist_move_seglen()
|
/kernel/power/ |
D | Kconfig | 29 of suspend, or they are content with invoking sync() from 55 from <http://suspend.sf.net>. 65 have it detect the saved image, restore memory state from it, and 90 Say N if no snapshotting from userspace is needed, this also 159 Only platforms, such as Android, that implement opportunistic sleep from 210 fields of device objects from user space. If you are not a kernel 252 functions from <linux/resume-trace.h> as well as the 290 and the Battery Powered Linux mini-HOWTO, available from 349 from which subsystems can access the energy models.
|
/kernel/gcov/ |
D | Kconfig | 27 To exclude files from being profiled even when CONFIG_GCOV_PROFILE_ALL 52 larger and run slower. Also be sure to exclude files from profiling
|
/kernel/locking/ |
D | test-ww_mutex.c | 126 const char *from = trylock ? "trylock" : "lock"; in test_aa() local 146 pr_err("%s: trylocked itself without context from %s!\n", __func__, from); in test_aa() 153 pr_err("%s: trylocked itself with context from %s!\n", __func__, from); in test_aa() 162 __func__, ret, from); in test_aa()
|
/kernel/cgroup/ |
D | cgroup-v1.c | 57 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) in cgroup_attach_task_all() argument 68 from_cgrp = task_cgroup_from_root(from, root); in cgroup_attach_task_all() 95 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) in cgroup_transfer_tasks() argument 116 list_for_each_entry(link, &from->cset_links, cset_link) in cgroup_transfer_tasks() 129 css_task_iter_start(&from->self, 0, &it); in cgroup_transfer_tasks()
|
/kernel/kcsan/ |
D | .kunitconfig | 21 # This prevents the test from timing out on many setups. Feel free to remove
|
/kernel/bpf/preload/ |
D | Kconfig | 10 # The dependency on !COMPILE_TEST prevents it from being enabled
|
/kernel/configs/ |
D | xen.config | 16 # pretty useless systems starting from allnoconfig
|
/kernel/trace/ |
D | Kconfig | 48 If this is set, then arguments and stack can be found from 373 to be scheduled in, starting from the point it has woken up. 431 without any interference from the operating system level, the 469 events can be used to trace the source of interference from NMI, 480 debugging and reverse engineering. It is called from the ioremap 596 conditions. This prevents the tracers themselves from being 627 support tools needed), fetch the blktrace tools from: 659 bool "Do NOT protect notrace function from kprobe events" 668 functions are protected from kprobe-events to prevent an infinite 767 used to combine data from other trace events or in fact any
|
/kernel/module/ |
D | Kconfig | 76 This enables module versioning for exported symbols also from 296 This option allows for unused exported symbols to be dropped from 307 By default, all unused exported symbols will be un-exported from the
|
/kernel/futex/ |
D | core.c | 462 int futex_get_value_locked(u32 *dest, u32 __user *from) in futex_get_value_locked() argument 467 ret = __get_user(*dest, from); in futex_get_value_locked()
|