Home
last modified time | relevance | path

Searched refs:n (Results 1 – 25 of 94) sorted by relevance

1234

/kernel/time/
Dtimeconst.bc16 define fmul(b,n,d) {
17 return (2^b*n+d-1)/d;
21 (imul * n) + (fmulxx * n + fadjxx) >> xx) */
22 define fadj(b,n,d) {
24 d = d/gcd(n,d);
33 define fmuls(b,n,d) {
36 m = fmul(s,n,d);
44 print "/* Automatically generated by kernel/time/timeconst.bc */\n"
45 print "/* Time conversion constants for HZ == ", hz, " */\n"
46 print "\n"
[all …]
/kernel/locking/
Dtest-ww_mutex.c278 unsigned int n, last = nthreads - 1; in __test_cycle() local
285 for (n = 0; n < nthreads; n++) { in __test_cycle()
286 struct test_cycle *cycle = &cycles[n]; in __test_cycle()
289 if (n == last) in __test_cycle()
292 cycle->b_mutex = &cycles[n + 1].a_mutex; in __test_cycle()
294 if (n == 0) in __test_cycle()
297 cycle->a_signal = &cycles[n - 1].b_signal; in __test_cycle()
304 for (n = 0; n < nthreads; n++) in __test_cycle()
305 queue_work(wq, &cycles[n].work); in __test_cycle()
310 for (n = 0; n < nthreads; n++) { in __test_cycle()
[all …]
/kernel/
Dauditsc.c164 unsigned n; in audit_match_perm() local
167 n = ctx->major; in audit_match_perm()
169 switch (audit_classify_syscall(ctx->arch, n)) { in audit_match_perm()
172 audit_match_class(AUDIT_CLASS_WRITE, n)) in audit_match_perm()
175 audit_match_class(AUDIT_CLASS_READ, n)) in audit_match_perm()
178 audit_match_class(AUDIT_CLASS_CHATTR, n)) in audit_match_perm()
183 audit_match_class(AUDIT_CLASS_WRITE_32, n)) in audit_match_perm()
186 audit_match_class(AUDIT_CLASS_READ_32, n)) in audit_match_perm()
189 audit_match_class(AUDIT_CLASS_CHATTR_32, n)) in audit_match_perm()
207 struct audit_names *n; in audit_match_filetype() local
[all …]
Dnotifier.c23 struct notifier_block *n) in notifier_chain_register() argument
26 if (unlikely((*nl) == n)) { in notifier_chain_register()
30 if (n->priority > (*nl)->priority) in notifier_chain_register()
34 n->next = *nl; in notifier_chain_register()
35 rcu_assign_pointer(*nl, n); in notifier_chain_register()
40 struct notifier_block *n) in notifier_chain_unregister() argument
43 if ((*nl) == n) { in notifier_chain_unregister()
44 rcu_assign_pointer(*nl, n->next); in notifier_chain_unregister()
140 struct notifier_block *n) in atomic_notifier_chain_register() argument
146 ret = notifier_chain_register(&nh->head, n); in atomic_notifier_chain_register()
[all …]
Dwatch_queue.c99 struct watch_notification *n) in post_one_notification() argument
126 len = n->info & WATCH_INFO_LENGTH; in post_one_notification()
128 memcpy(p + offset, n, len); in post_one_notification()
163 const struct watch_notification *n) in filter_watch_notification() argument
167 unsigned int st_index = n->subtype / st_bits; in filter_watch_notification()
168 unsigned int st_bit = 1U << (n->subtype % st_bits); in filter_watch_notification()
171 if (!test_bit(n->type, wf->type_filter)) in filter_watch_notification()
176 if (n->type == wt->type && in filter_watch_notification()
178 (n->info & wt->info_mask) == wt->info_filter) in filter_watch_notification()
199 struct watch_notification *n, in __post_watch_notification() argument
[all …]
DMakefile26 KCOV_INSTRUMENT_softirq.o := n
29 KCSAN_SANITIZE_softirq.o = n
32 KCOV_INSTRUMENT_module.o := n
33 KCOV_INSTRUMENT_extable.o := n
34 KCOV_INSTRUMENT_stacktrace.o := n
36 KCOV_INSTRUMENT_kcov.o := n
37 KASAN_SANITIZE_kcov.o := n
38 KCSAN_SANITIZE_kcov.o := n
133 KASAN_SANITIZE_stackleak.o := n
134 KCSAN_SANITIZE_stackleak.o := n
[all …]
DKconfig.locks96 # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
99 # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
102 # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
104 # - DEBUG_SPINLOCK=n and PREEMPTION=n
107 # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
Dprofile.c166 int task_handoff_register(struct notifier_block *n) in task_handoff_register() argument
168 return atomic_notifier_chain_register(&task_free_notifier, n); in task_handoff_register()
172 int task_handoff_unregister(struct notifier_block *n) in task_handoff_unregister() argument
174 return atomic_notifier_chain_unregister(&task_free_notifier, n); in task_handoff_unregister()
178 int profile_event_register(enum profile_type type, struct notifier_block *n) in profile_event_register() argument
185 &task_exit_notifier, n); in profile_event_register()
189 &munmap_notifier, n); in profile_event_register()
197 int profile_event_unregister(enum profile_type type, struct notifier_block *n) in profile_event_unregister() argument
204 &task_exit_notifier, n); in profile_event_unregister()
208 &munmap_notifier, n); in profile_event_unregister()
Daudit_tree.c221 unsigned long n = key / L1_CACHE_BYTES; in chunk_hash() local
222 return chunk_hash_heads + n % HASH_SIZE; in chunk_hash()
263 int n; in audit_tree_match() local
264 for (n = 0; n < chunk->count; n++) in audit_tree_match()
265 if (chunk->owners[n].owner == tree) in audit_tree_match()
463 int n; in tag_chunk() local
478 for (n = 0; n < old->count; n++) { in tag_chunk()
479 if (old->owners[n].owner == tree) { in tag_chunk()
1009 int n; in evict_chunk() local
1030 for (n = 0; n < chunk->count; n++) in evict_chunk()
[all …]
Duser.c216 int n; in uid_cache_init() local
221 for(n = 0; n < UIDHASH_SZ; ++n) in uid_cache_init()
222 INIT_HLIST_HEAD(uidhash_table + n); in uid_cache_init()
/kernel/power/
Dwakeup_reason.c100 struct wakeup_irq_node *n; in delete_list() local
103 n = list_first_entry(head, struct wakeup_irq_node, siblings); in delete_list()
104 list_del(&n->siblings); in delete_list()
105 kmem_cache_free(wakeup_irq_nodes_cache, n); in delete_list()
111 struct wakeup_irq_node *n = NULL; in add_sibling_node_sorted() local
118 list_for_each_entry(n, head, siblings) { in add_sibling_node_sorted()
119 if (n->irq < irq) in add_sibling_node_sorted()
120 predecessor = &n->siblings; in add_sibling_node_sorted()
121 else if (n->irq == irq) in add_sibling_node_sorted()
127 n = create_node(irq); in add_sibling_node_sorted()
[all …]
Dmain.c107 const char *buf, size_t n) in pm_async_store() argument
118 return n; in pm_async_store()
147 static suspend_state_t decode_suspend_state(const char *buf, size_t n) in decode_suspend_state() argument
153 p = memchr(buf, '\n', n); in decode_suspend_state()
154 len = p ? p - buf : n; in decode_suspend_state()
167 const char *buf, size_t n) in mem_sleep_store() argument
181 state = decode_suspend_state(buf, n); in mem_sleep_store()
189 return error ? error : n; in mem_sleep_store()
210 const char *buf, size_t n) in sync_on_suspend_store() argument
221 return n; in sync_on_suspend_store()
[all …]
Dhibernate.c1095 const char *buf, size_t n) in disk_store() argument
1106 p = memchr(buf, '\n', n); in disk_store()
1107 len = p ? p - buf : n; in disk_store()
1140 return error ? error : n; in disk_store()
1153 const char *buf, size_t n) in resume_store() argument
1156 int len = n; in resume_store()
1177 return n; in resume_store()
1190 size_t n) in resume_offset_store() argument
1200 return n; in resume_offset_store()
1212 const char *buf, size_t n) in image_size_store() argument
[all …]
/kernel/irq/
Daffinity.c86 int n, nodes = 0; in get_nodes_in_cpumask() local
89 for_each_node(n) { in get_nodes_in_cpumask()
90 if (cpumask_intersects(mask, node_to_cpumask[n])) { in get_nodes_in_cpumask()
91 node_set(n, *nodemsk); in get_nodes_in_cpumask()
135 unsigned n, remaining_ncpus = 0; in alloc_nodes_vectors() local
137 for (n = 0; n < nr_node_ids; n++) { in alloc_nodes_vectors()
138 node_vectors[n].id = n; in alloc_nodes_vectors()
139 node_vectors[n].ncpus = UINT_MAX; in alloc_nodes_vectors()
142 for_each_node_mask(n, nodemsk) { in alloc_nodes_vectors()
145 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); in alloc_nodes_vectors()
[all …]
/kernel/configs/
Dnopm.config1 CONFIG_PM=n
2 CONFIG_SUSPEND=n
3 CONFIG_HIBERNATION=n
6 CONFIG_CPU_IDLE=n
9 CONFIG_XEN=n
12 CONFIG_ARCH_OMAP2PLUS_TYPICAL=n
13 CONFIG_ARCH_RENESAS=n
14 CONFIG_ARCH_TEGRA=n
15 CONFIG_ARCH_VEXPRESS=n
/kernel/sched/
Dloadavg.c110 fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) in fixed_power_int() argument
114 if (n) { in fixed_power_int()
116 if (n & 1) { in fixed_power_int()
121 n >>= 1; in fixed_power_int()
122 if (!n) in fixed_power_int()
158 unsigned long active, unsigned int n) in calc_load_n() argument
160 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); in calc_load_n()
307 long delta, active, n; in calc_global_nohz() local
315 n = 1 + (delta / LOAD_FREQ); in calc_global_nohz()
320 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); in calc_global_nohz()
[all …]
Dstats.c85 unsigned long n = *offset; in schedstat_start() local
87 if (n == 0) in schedstat_start()
90 n--; in schedstat_start()
92 if (n > 0) in schedstat_start()
93 n = cpumask_next(n - 1, cpu_online_mask); in schedstat_start()
95 n = cpumask_first(cpu_online_mask); in schedstat_start()
97 *offset = n + 1; in schedstat_start()
99 if (n < nr_cpu_ids) in schedstat_start()
100 return (void *)(unsigned long)(n + 2); in schedstat_start()
Ddebug.c207 static struct ctl_table *sd_alloc_ctl_entry(int n) in sd_alloc_ctl_entry() argument
210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); in sd_alloc_ctl_entry()
724 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); in print_cpu() argument
730 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); in print_cpu() argument
844 unsigned long n = *offset; in sched_debug_start() local
846 if (n == 0) in sched_debug_start()
849 n--; in sched_debug_start()
851 if (n > 0) in sched_debug_start()
852 n = cpumask_next(n - 1, cpu_online_mask); in sched_debug_start()
854 n = cpumask_first(cpu_online_mask); in sched_debug_start()
[all …]
/kernel/bpf/preload/
Dbpf_preload_kern.c28 ssize_t n; in preload() local
35 n = kernel_write(umd_ops.info.pipe_to_umh, in preload()
37 if (n != sizeof(magic)) in preload()
43 n = kernel_read(umd_ops.info.pipe_from_umh, in preload()
45 if (n != sizeof(*obj)) in preload()
56 ssize_t n; in finish() local
59 n = kernel_write(umd_ops.info.pipe_to_umh, in finish()
61 if (n != sizeof(magic)) in finish()
/kernel/bpf/
Dtnum.c180 size_t n; in tnum_sbin() local
182 for (n = 64; n; n--) { in tnum_sbin()
183 if (n < size) { in tnum_sbin()
185 str[n - 1] = 'x'; in tnum_sbin()
187 str[n - 1] = '1'; in tnum_sbin()
189 str[n - 1] = '0'; in tnum_sbin()
Dbpf_iter.c84 size_t n, offs, copied = 0; in bpf_seq_read() local
100 n = min(seq->count, size); in bpf_seq_read()
101 err = copy_to_user(buf, seq->buf + seq->from, n); in bpf_seq_read()
106 seq->count -= n; in bpf_seq_read()
107 seq->from += n; in bpf_seq_read()
108 copied = n; in bpf_seq_read()
200 n = min(seq->count, size); in bpf_seq_read()
201 err = copy_to_user(buf, seq->buf, n); in bpf_seq_read()
206 copied = n; in bpf_seq_read()
207 seq->count -= n; in bpf_seq_read()
[all …]
/kernel/events/
Duprobes.c636 struct rb_node *n = uprobes_tree.rb_node; in __find_uprobe() local
640 while (n) { in __find_uprobe()
641 uprobe = rb_entry(n, struct uprobe, rb_node); in __find_uprobe()
647 n = n->rb_left; in __find_uprobe()
649 n = n->rb_right; in __find_uprobe()
1268 struct rb_node *n = uprobes_tree.rb_node; in find_node_in_range() local
1270 while (n) { in find_node_in_range()
1271 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); in find_node_in_range()
1274 n = n->rb_left; in find_node_in_range()
1276 n = n->rb_right; in find_node_in_range()
[all …]
/kernel/gcov/
DKconfig8 default n
29 GCOV_PROFILE_main.o := n
31 GCOV_PROFILE := n
37 def_bool n
44 default n
/kernel/entry/
DMakefile5 KASAN_SANITIZE := n
6 UBSAN_SANITIZE := n
7 KCOV_INSTRUMENT := n
/kernel/kcsan/
DMakefile2 KCSAN_SANITIZE := n
3 KCOV_INSTRUMENT := n
4 UBSAN_SANITIZE := n

1234