/kernel/bpf/ |
D | tnum.c | 37 struct tnum tnum_lshift(struct tnum a, u8 shift) in tnum_lshift() argument 39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift() 42 struct tnum tnum_rshift(struct tnum a, u8 shift) in tnum_rshift() argument 44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift() 47 struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness) in tnum_arshift() argument 55 return TNUM((u32)(((s32)a.value) >> min_shift), in tnum_arshift() 56 (u32)(((s32)a.mask) >> min_shift)); in tnum_arshift() 58 return TNUM((s64)a.value >> min_shift, in tnum_arshift() 59 (s64)a.mask >> min_shift); in tnum_arshift() 62 struct tnum tnum_add(struct tnum a, struct tnum b) in tnum_add() argument [all …]
|
D | Kconfig | 8 # flavour. Only one of the two can be selected for a specific arch since 44 BPF programs are normally handled by a BPF interpreter. This option 45 allows the kernel to generate native code when a program is loaded
|
/kernel/trace/ |
D | tracing_map.c | 130 char *a = val_a; in tracing_map_cmp_string() local 133 return strcmp(a, b); in tracing_map_cmp_string() 143 u64 a = atomic64_read((atomic64_t *)val_a); in tracing_map_cmp_atomic64() local 146 return (a > b) ? 1 : ((a < b) ? -1 : 0); in tracing_map_cmp_atomic64() 152 type a = (type)(*(u64 *)val_a); \ 155 return (a > b) ? 1 : ((a < b) ? -1 : 0); \ 287 static void tracing_map_array_clear(struct tracing_map_array *a) in tracing_map_array_clear() argument 291 if (!a->pages) in tracing_map_array_clear() 294 for (i = 0; i < a->n_pages; i++) in tracing_map_array_clear() 295 memset(a->pages[i], 0, PAGE_SIZE); in tracing_map_array_clear() [all …]
|
D | Kconfig | 96 Adds a very slight overhead to tracing when enabled. 174 by using a compiler feature to insert a small, 5-byte No-Operation 176 sequence is then dynamically patched into a tracer call when 188 Enable the kernel to trace a function at both its return 191 draw a call graph for each thread with some information like 193 address on the current task structure into a stack of calls. 203 replace them with a No-Op instruction) on boot up. During 204 compile time, a table is made of all the locations that ftrace 215 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 240 When a 1 is echoed into this file profiling begins, and when a [all …]
|
D | trace_events_filter_test.h | 12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 14 TP_ARGS(a, b, c, d, e, f, g, h), 17 __field(int, a) 28 __entry->a = a; 39 __entry->a, __entry->b, __entry->c, __entry->d,
|
D | trace_probe_tmpl.h | 265 struct probe_arg *a = args + i; in print_probe_args() local 267 trace_seq_printf(s, " %s=", a->name); in print_probe_args() 268 if (likely(!a->count)) { in print_probe_args() 269 if (!a->type->print(s, data + a->offset, field)) in print_probe_args() 274 p = data + a->offset; in print_probe_args() 275 for (j = 0; j < a->count; j++) { in print_probe_args() 276 if (!a->type->print(s, p, field)) in print_probe_args() 278 trace_seq_putc(s, j == a->count - 1 ? '}' : ','); in print_probe_args() 279 p += a->type->size; in print_probe_args()
|
D | trace_branch.c | 337 const struct ftrace_branch_data *a = p1; in annotated_branch_stat_cmp() local 342 percent_a = get_incorrect_percent(a); in annotated_branch_stat_cmp() 350 if (a->incorrect < b->incorrect) in annotated_branch_stat_cmp() 352 if (a->incorrect > b->incorrect) in annotated_branch_stat_cmp() 360 if (a->correct > b->correct) in annotated_branch_stat_cmp() 362 if (a->correct < b->correct) in annotated_branch_stat_cmp()
|
D | trace_hwlat.c | 159 #define time_sub(a, b) ((a) - (b)) argument 160 #define init_time(a, b) (a = b) argument 161 #define time_u64(a) a argument
|
/kernel/ |
D | Kconfig.preempt | 15 Select this option if you are building a kernel for a server or 30 This allows reaction to interactive events by allowing a 32 is in kernel mode executing a system call. This allows 36 Select this if you are building a kernel for a desktop system. 46 all kernel code (that is not executing in a critical section) 48 permitting a low priority process to be preempted involuntarily 49 even if it is in kernel mode executing a system call and would 50 otherwise not be about to reach a natural preemption point. 53 and a slight runtime overhead to kernel code. 55 Select this if you are building a kernel for a desktop or [all …]
|
D | Kconfig.hz | 13 a fast response for user interaction and that may experience bus 14 contention and cacheline bounces as a result of timer interrupts. 23 100 Hz is a typical choice for servers, SMP and NUMA systems 30 250 Hz is a good compromise choice allowing server performance 38 300 Hz is a good compromise choice allowing server performance
|
D | auditfilter.c | 696 static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) in audit_compare_rule() argument 700 if (a->flags != b->flags || in audit_compare_rule() 701 a->pflags != b->pflags || in audit_compare_rule() 702 a->listnr != b->listnr || in audit_compare_rule() 703 a->action != b->action || in audit_compare_rule() 704 a->field_count != b->field_count) in audit_compare_rule() 707 for (i = 0; i < a->field_count; i++) { in audit_compare_rule() 708 if (a->fields[i].type != b->fields[i].type || in audit_compare_rule() 709 a->fields[i].op != b->fields[i].op) in audit_compare_rule() 712 switch(a->fields[i].type) { in audit_compare_rule() [all …]
|
D | static_call_inline.c | 67 const struct static_call_site *a = _a; in static_call_site_cmp() local 69 const struct static_call_key *key_a = static_call_key(a); in static_call_site_cmp() 84 struct static_call_site *a = _a; in static_call_site_swap() local 86 struct static_call_site tmp = *a; in static_call_site_swap() 88 a->addr = b->addr - delta; in static_call_site_swap() 89 a->key = b->key - delta; in static_call_site_swap()
|
D | groups.c | 78 kgid_t a = *(kgid_t *)_a; in gid_cmp() local 81 return gid_gt(a, b) - gid_lt(a, b); in gid_cmp()
|
D | cred.c | 632 int cred_fscmp(const struct cred *a, const struct cred *b) in cred_fscmp() argument 637 if (a == b) in cred_fscmp() 639 if (uid_lt(a->fsuid, b->fsuid)) in cred_fscmp() 641 if (uid_gt(a->fsuid, b->fsuid)) in cred_fscmp() 644 if (gid_lt(a->fsgid, b->fsgid)) in cred_fscmp() 646 if (gid_gt(a->fsgid, b->fsgid)) in cred_fscmp() 649 ga = a->group_info; in cred_fscmp()
|
/kernel/power/ |
D | Kconfig | 30 user-space before invoking suspend. There's a run-time switch 71 It also works with swap files to a limited extent (for details see 79 will get corrupted in a nasty way. 81 For more information take a look at <file:Documentation/power/swsusp.rst>. 101 to-disk implementation will look for a suspended disk image. 104 It should be a valid swap partition (at least for now) that is turned 113 Note there is currently not a way to specify which device to save the 135 If an arch can suspend (for suspend, hibernate, kexec, etc) on a 143 Allow the kernel to trigger a system transition into a global sleep 151 objects with the help of a sysfs-based interface. [all …]
|
/kernel/rcu/ |
D | Kconfig.debug | 18 need to be converted to pass a lockdep expression. To prevent 36 This option provides a kernel module that runs performance 42 Say M if you want the RCU performance tests to build as a module. 55 This option provides a kernel module that runs torture tests 61 Say M if you want the RCU torture tests to build as a module. 74 This option provides a kernel module that runs performance tests 80 Say M if you want to build it as a module instead. 89 If a given RCU grace period extends more than the specified 90 number of seconds, a CPU stall warning is printed. If the
|
D | Kconfig | 83 This option enables a task-based RCU implementation that uses 91 This option enables a task-based RCU implementation that uses 101 This option enables a task-based RCU implementation that uses 135 Select a specific number if testing RCU itself. 160 fanout to a large number will likely cause problematic 164 Select a specific number if testing RCU itself. 211 a given grace period before priority-boosting preempted RCU 218 bool "Perform RCU expedited work in a real-time kthread" 247 such CPU, a kthread ("rcuox/N") will be created to invoke 266 RCU grace periods. Given that a reasonable setting of [all …]
|
/kernel/time/ |
D | timeconst.bc | 5 define gcd(a,b) { 9 b = a % b; 10 a = t; 12 return a; 20 /* Adjustment factor when a ceiling value is used. Use as: 29 /* Compute the appropriate mul/adj values as well as a shift count, 31 a shift value will be correct in the signed integer range and off
|
D | Kconfig | 33 # Architecture can handle broadcast in a driver-agnostic way 96 This option keeps the tick running periodically at a constant 103 This option enables a tickless idle system: timer interrupts 124 the CPU is running tasks. Typically this requires running a single 136 If you're a distro say Y. 173 We keep it around for a little while to enforce backward
|
/kernel/dma/ |
D | Kconfig | 87 This enables support for restricted DMA pools which provide a level of 98 # The only thing that is really required is a way to set an uncached bit 169 Allocator as a percentage of the total memory in the system. 198 size. This works well for buffers up to a few hundreds kilobytes, but 199 for larger buffers it just a memory waste. With this parameter you can 202 expressed as a power of two multiplied by the PAGE_SIZE. 220 This option causes a performance degradation. Use only if you want to 236 preparing literal scatter-gather descriptors, where there is a risk of
|
/kernel/locking/ |
D | ww_mutex.h | 228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) in __ww_ctx_less() argument 237 int a_prio = a->task->prio; in __ww_ctx_less() 252 a->task->dl.deadline)) in __ww_ctx_less() 255 if (dl_time_before(a->task->dl.deadline, in __ww_ctx_less() 265 return (signed long)(a->stamp - b->stamp) > 0; in __ww_ctx_less()
|
/kernel/gcov/ |
D | Kconfig | 19 directories, add a line similar to the following to the respective 22 For a single file (e.g. main.o): 52 Note that a kernel compiled with profiling flags will be significantly
|
/kernel/bpf/preload/ |
D | Makefile | 4 LIBBPF_A = $(obj)/libbpf.a 11 $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
|
/kernel/livepatch/ |
D | Kconfig | 17 This option has no runtime impact until a kernel "patch" 19 a patch, causing calls to patched functions to be redirected
|
/kernel/kcsan/ |
D | debugfs.c | 97 const unsigned long a = *(const unsigned long *)rhs; in cmp_filterlist_addrs() local 100 return a < b ? -1 : a == b ? 0 : 1; in cmp_filterlist_addrs()
|