Home
last modified time | relevance | path

Searched refs:i (Results 1 – 25 of 199) sorted by relevance

12345678

/kernel/rcu/
Drefscale.c144 int i; in ref_rcu_read_section() local
146 for (i = nloops; i >= 0; i--) { in ref_rcu_read_section()
154 int i; in ref_rcu_delay_section() local
156 for (i = nloops; i >= 0; i--) { in ref_rcu_delay_section()
180 int i; in srcu_ref_scale_read_section() local
183 for (i = nloops; i >= 0; i--) { in srcu_ref_scale_read_section()
191 int i; in srcu_ref_scale_delay_section() local
194 for (i = nloops; i >= 0; i--) { in srcu_ref_scale_delay_section()
212 int i; in rcu_tasks_ref_scale_read_section() local
214 for (i = nloops; i >= 0; i--) in rcu_tasks_ref_scale_read_section()
[all …]
Drcu_segcblist.c101 int i; in rcu_segcblist_n_segment_cbs() local
103 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) in rcu_segcblist_n_segment_cbs()
104 len += rcu_segcblist_get_seglen(rsclp, i); in rcu_segcblist_n_segment_cbs()
239 int i; in rcu_segcblist_init() local
244 for (i = 0; i < RCU_CBLIST_NSEGS; i++) { in rcu_segcblist_init()
245 rsclp->tails[i] = &rsclp->head; in rcu_segcblist_init()
246 rcu_segcblist_set_seglen(rsclp, i, 0); in rcu_segcblist_init()
365 int i; in rcu_segcblist_entrain() local
372 for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) in rcu_segcblist_entrain()
373 if (rsclp->tails[i] != rsclp->tails[i - 1]) in rcu_segcblist_entrain()
[all …]
Drcuscale.c390 int i = 0; in rcu_scale_writer() local
429 wdp = &wdpp[i]; in rcu_scale_writer()
452 i_max = i; in rcu_scale_writer()
456 if (!done && i >= MIN_MEAS) { in rcu_scale_writer()
483 if (started && !alldone && i < MAX_MEAS - 1) in rcu_scale_writer()
484 i++; in rcu_scale_writer()
546 int i, loop = 0; in kfree_scale_thread() local
575 for (i = 0; i < kfree_alloc_num; i++) { in kfree_scale_thread()
620 int i; in kfree_scale_cleanup() local
626 for (i = 0; i < kfree_nrealthreads; i++) in kfree_scale_cleanup()
[all …]
Drcutorture.c244 unsigned int i = READ_ONCE(rcu_torture_writer_state); in rcu_torture_writer_state_getname() local
246 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) in rcu_torture_writer_state_getname()
248 return rcu_torture_writer_state_names[i]; in rcu_torture_writer_state_getname()
419 int i; in rcu_torture_pipe_update_one() local
426 i = READ_ONCE(rp->rtort_pipe_count); in rcu_torture_pipe_update_one()
427 if (i > RCU_TORTURE_PIPE_LEN) in rcu_torture_pipe_update_one()
428 i = RCU_TORTURE_PIPE_LEN; in rcu_torture_pipe_update_one()
429 atomic_inc(&rcu_torture_wcount[i]); in rcu_torture_pipe_update_one()
430 WRITE_ONCE(rp->rtort_pipe_count, i + 1); in rcu_torture_pipe_update_one()
1154 int i; in rcu_torture_writer() local
[all …]
/kernel/irq/
Dtimings.c281 #define for_each_irqts(i, irqts) \ argument
282 for (i = irqts->count < IRQ_TIMINGS_SIZE ? \
287 i = (i + 1) & IRQ_TIMINGS_MASK)
384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local
421 for (i = 0; i < count; i++) { in __irq_timings_next_event()
422 int index = (start + i) & IRQ_TIMINGS_MASK; in __irq_timings_next_event()
424 irqs->timings[i] = irqs->circ_timings[index]; in __irq_timings_next_event()
425 min = min_t(int, irqs->timings[i], min); in __irq_timings_next_event()
542 int i, irq = 0; in irq_timings_next_event() local
568 for_each_irqts(i, irqts) { in irq_timings_next_event()
[all …]
/kernel/cgroup/
Dmisc.c143 struct misc_cg *i, *j; in misc_cg_try_charge() local
154 for (i = cg; i; i = parent_misc(i)) { in misc_cg_try_charge()
155 res = &i->res[type]; in misc_cg_try_charge()
163 pr_cont_cgroup_path(i->css.cgroup); in misc_cg_try_charge()
174 for (j = cg; j != i; j = parent_misc(j)) in misc_cg_try_charge()
176 misc_cg_cancel_charge(type, i, amount); in misc_cg_try_charge()
192 struct misc_cg *i; in misc_cg_uncharge() local
197 for (i = cg; i; i = parent_misc(i)) in misc_cg_uncharge()
198 misc_cg_cancel_charge(type, i, amount); in misc_cg_uncharge()
212 int i; in misc_cg_max_show() local
[all …]
/kernel/trace/
Dtracing_map.c39 void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n) in tracing_map_update_sum() argument
41 atomic64_add(n, &elt->fields[i].sum); in tracing_map_update_sum()
56 u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i) in tracing_map_read_sum() argument
58 return (u64)atomic64_read(&elt->fields[i].sum); in tracing_map_read_sum()
71 void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n) in tracing_map_set_var() argument
73 atomic64_set(&elt->vars[i], n); in tracing_map_set_var()
74 elt->var_set[i] = true; in tracing_map_set_var()
86 bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i) in tracing_map_var_set() argument
88 return elt->var_set[i]; in tracing_map_var_set()
103 u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i) in tracing_map_read_var() argument
[all …]
Dtrace_events_inject.c41 int s, i = 0; in parse_field() local
45 if (!str[i]) in parse_field()
48 while (isspace(str[i])) in parse_field()
49 i++; in parse_field()
50 s = i; in parse_field()
51 while (isalnum(str[i]) || str[i] == '_') in parse_field()
52 i++; in parse_field()
53 len = i - s; in parse_field()
66 while (isspace(str[i])) in parse_field()
67 i++; in parse_field()
[all …]
Dtrace_events_filter.c425 int i; in predicate_parse() local
562 for (i = N-1 ; i--; ) { in predicate_parse()
563 int target = prog[i].target; in predicate_parse()
564 if (prog[i].when_to_branch == prog[target].when_to_branch) in predicate_parse()
565 prog[i].target = prog[target].target; in predicate_parse()
569 for (i = 0; i < N; i++) { in predicate_parse()
570 invert = inverts[i] ^ prog[i].when_to_branch; in predicate_parse()
571 prog[i].when_to_branch = invert; in predicate_parse()
573 if (WARN_ON(prog[i].target <= i)) { in predicate_parse()
586 for (i = 0; prog_stack[i].pred; i++) in predicate_parse()
[all …]
Dtrace_events_synth.c119 unsigned int i, size, n_u64; in synth_event_define_fields() local
124 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { in synth_event_define_fields()
125 size = event->fields[i]->size; in synth_event_define_fields()
126 is_signed = event->fields[i]->is_signed; in synth_event_define_fields()
127 type = event->fields[i]->type; in synth_event_define_fields()
128 name = event->fields[i]->name; in synth_event_define_fields()
134 event->fields[i]->offset = n_u64; in synth_event_define_fields()
136 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) { in synth_event_define_fields()
334 unsigned int i, n_u64; in print_synth_event() local
343 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { in print_synth_event()
[all …]
Dtrace_stack.c39 long i; in print_max_stack() local
46 for (i = 0; i < stack_trace_nr_entries; i++) { in print_max_stack()
47 if (i + 1 == stack_trace_nr_entries) in print_max_stack()
48 size = stack_trace_index[i]; in print_max_stack()
50 size = stack_trace_index[i] - stack_trace_index[i+1]; in print_max_stack()
52 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], in print_max_stack()
53 size, (void *)stack_dump_trace[i]); in print_max_stack()
160 int i, x; in check_stack() local
196 for (i = 0; i < stack_trace_nr_entries; i++) { in check_stack()
197 if (stack_dump_trace[i] == ip) in check_stack()
[all …]
Dtrace_events_hist.c430 #define for_each_hist_field(i, hist_data) \ argument
431 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
433 #define for_each_hist_val_field(i, hist_data) \ argument
434 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
436 #define for_each_hist_key_field(i, hist_data) \ argument
437 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
870 unsigned int i; in find_var_ref() local
872 for (i = 0; i < hist_data->n_var_refs; i++) { in find_var_ref()
873 hist_field = hist_data->var_refs[i]; in find_var_ref()
930 int i; in check_var_refs() local
[all …]
/kernel/
Drange.c32 int i; in add_range_with_merge() local
38 for (i = 0; i < nr_range; i++) { in add_range_with_merge()
41 if (!range[i].end) in add_range_with_merge()
44 common_start = max(range[i].start, start); in add_range_with_merge()
45 common_end = min(range[i].end, end); in add_range_with_merge()
50 start = min(range[i].start, start); in add_range_with_merge()
51 end = max(range[i].end, end); in add_range_with_merge()
53 memmove(&range[i], &range[i + 1], in add_range_with_merge()
54 (nr_range - (i + 1)) * sizeof(range[i])); in add_range_with_merge()
58 i--; in add_range_with_merge()
[all …]
Dlatencytop.c90 int i; in account_global_scheduler_latency() local
96 for (i = 0; i < MAXLR; i++) { in account_global_scheduler_latency()
100 if (!latency_record[i].backtrace[0]) { in account_global_scheduler_latency()
101 if (firstnonnull > i) in account_global_scheduler_latency()
102 firstnonnull = i; in account_global_scheduler_latency()
108 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency()
118 latency_record[i].count++; in account_global_scheduler_latency()
119 latency_record[i].time += lat->time; in account_global_scheduler_latency()
120 if (lat->time > latency_record[i].max) in account_global_scheduler_latency()
121 latency_record[i].max = lat->time; in account_global_scheduler_latency()
[all …]
Dprofile.c257 int i, j, cpu; in profile_flip_buffers() local
265 for (i = 0; i < NR_PROFILE_HIT; ++i) { in profile_flip_buffers()
266 if (!hits[i].hits) { in profile_flip_buffers()
267 if (hits[i].pc) in profile_flip_buffers()
268 hits[i].pc = 0; in profile_flip_buffers()
271 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers()
272 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers()
280 int i, cpu; in profile_discard_flip_buffers() local
283 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers()
287 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers()
[all …]
Dauditfilter.c83 int i; in audit_free_rule() local
90 for (i = 0; i < erule->field_count; i++) in audit_free_rule()
91 audit_free_lsm_field(&erule->fields[i]); in audit_free_rule()
198 int i; in audit_match_class_bits() local
201 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_match_class_bits()
202 if (mask[i] & classes[class][i]) in audit_match_class_bits()
239 int i, err; in audit_to_entry_common() local
277 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_to_entry_common()
278 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common()
280 for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { in audit_to_entry_common()
[all …]
Dkexec_file.c333 int ret = 0, i; in SYSCALL_DEFINE5() local
388 for (i = 0; i < image->nr_segments; i++) { in SYSCALL_DEFINE5()
391 ksegment = &image->segment[i]; in SYSCALL_DEFINE5()
393 i, ksegment->buf, ksegment->bufsz, ksegment->mem, in SYSCALL_DEFINE5()
396 ret = kimage_load_segment(image, &image->segment[i]); in SYSCALL_DEFINE5()
527 u64 i; in kexec_walk_memblock() local
535 for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE, in kexec_walk_memblock()
549 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, in kexec_walk_memblock()
690 int ret = 0, i, j, zero_buf_sz, sha_region_sz; in kexec_calculate_store_digests() local
735 for (j = i = 0; i < image->nr_segments; i++) { in kexec_calculate_store_digests()
[all …]
Dscftorture.c158 int i; in scf_torture_stats_print() local
165 for (i = 0; i < nthreads; i++) { in scf_torture_stats_print()
166 scfs.n_resched += scf_stats_p[i].n_resched; in scf_torture_stats_print()
167 scfs.n_single += scf_stats_p[i].n_single; in scf_torture_stats_print()
168 scfs.n_single_ofl += scf_stats_p[i].n_single_ofl; in scf_torture_stats_print()
169 scfs.n_single_rpc += scf_stats_p[i].n_single_rpc; in scf_torture_stats_print()
170 scfs.n_single_wait += scf_stats_p[i].n_single_wait; in scf_torture_stats_print()
171 scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl; in scf_torture_stats_print()
172 scfs.n_many += scf_stats_p[i].n_many; in scf_torture_stats_print()
173 scfs.n_many_wait += scf_stats_p[i].n_many_wait; in scf_torture_stats_print()
[all …]
Dkexec_core.c151 int i; in sanity_check_segment_list() local
169 for (i = 0; i < nr_segments; i++) { in sanity_check_segment_list()
172 mstart = image->segment[i].mem; in sanity_check_segment_list()
173 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
187 for (i = 0; i < nr_segments; i++) { in sanity_check_segment_list()
191 mstart = image->segment[i].mem; in sanity_check_segment_list()
192 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
193 for (j = 0; j < i; j++) { in sanity_check_segment_list()
209 for (i = 0; i < nr_segments; i++) { in sanity_check_segment_list()
210 if (image->segment[i].bufsz > image->segment[i].memsz) in sanity_check_segment_list()
[all …]
Dkprobes.c144 int i; in __get_insn_slot() local
145 for (i = 0; i < slots_per_page(c); i++) { in __get_insn_slot()
146 if (kip->slot_used[i] == SLOT_CLEAN) { in __get_insn_slot()
147 kip->slot_used[i] = SLOT_USED; in __get_insn_slot()
149 slot = kip->insns + (i * c->insn_size); in __get_insn_slot()
235 int i; in collect_garbage_slots() local
239 for (i = 0; i < slots_per_page(c); i++) { in collect_garbage_slots()
240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) in collect_garbage_slots()
487 int i; in get_optimized_kprobe() local
492 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) in get_optimized_kprobe()
[all …]
/kernel/debug/
Ddebug_core.c287 int i; in kgdb_flush_swbreak_addr() local
289 for (i = 0; i < VMACACHE_SIZE; i++) { in kgdb_flush_swbreak_addr()
290 if (!current->vmacache.vmas[i]) in kgdb_flush_swbreak_addr()
292 flush_cache_range(current->vmacache.vmas[i], in kgdb_flush_swbreak_addr()
309 int i; in dbg_activate_sw_breakpoints() local
311 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in dbg_activate_sw_breakpoints()
312 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints()
315 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); in dbg_activate_sw_breakpoints()
319 kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints()
323 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints()
[all …]
/kernel/sched/
Dtopology.c357 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains() local
394 for_each_cpu(i, cpu_map) { in build_perf_domains()
396 if (find_pd(pd, i)) in build_perf_domains()
400 tmp = pd_init(i); in build_perf_domains()
868 int i; in build_balance_mask() local
872 for_each_cpu(i, sg_span) { in build_balance_mask()
873 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
887 cpumask_set_cpu(i, mask); in build_balance_mask()
982 int i; in build_overlap_sched_groups() local
986 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups()
[all …]
/kernel/debug/kdb/
Dkdb_bp.c176 int i; in kdb_bp_install() local
178 for (i = 0; i < KDB_MAXBPT; i++) { in kdb_bp_install()
179 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_install()
183 __func__, i, bp->bp_enabled); in kdb_bp_install()
207 int i; in kdb_bp_remove() local
209 for (i = KDB_MAXBPT - 1; i >= 0; i--) { in kdb_bp_remove()
210 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_remove()
214 __func__, i, bp->bp_enabled); in kdb_bp_remove()
238 static void kdb_printbp(kdb_bp_t *bp, int i) in kdb_printbp() argument
241 kdb_printf("BP #%d at ", i); in kdb_printbp()
[all …]
/kernel/bpf/
Dverifier.c270 u32 i, nr_linfo; in find_linfo() local
279 for (i = 1; i < nr_linfo; i++) in find_linfo()
280 if (insn_off < linfo[i].insn_off) in find_linfo()
283 return &linfo[i - 1]; in find_linfo()
634 int i; in print_verifier_state() local
638 for (i = 0; i < MAX_BPF_REG; i++) { in print_verifier_state()
639 reg = &state->regs[i]; in print_verifier_state()
643 verbose(env, " R%d", i); in print_verifier_state()
716 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
722 if (state->stack[i].slot_type[j] != STACK_INVALID) in print_verifier_state()
[all …]
Ddevmap.c93 int i; in dev_map_create_hash() local
98 for (i = 0; i < entries; i++) in dev_map_create_hash()
99 INIT_HLIST_HEAD(&hash[i]); in dev_map_create_hash()
185 int i; in dev_map_free() local
208 for (i = 0; i < dtab->n_buckets; i++) { in dev_map_free()
213 head = dev_map_index_hash(dtab, i); in dev_map_free()
226 for (i = 0; i < dtab->map.max_entries; i++) { in dev_map_free()
229 dev = rcu_dereference_raw(dtab->netdev_map[i]); in dev_map_free()
287 int i = 0; in dev_map_hash_get_next_key() local
306 i = idx & (dtab->n_buckets - 1); in dev_map_hash_get_next_key()
[all …]

12345678