/kernel/rcu/ |
D | refscale.c | 146 int i; in ref_rcu_read_section() local 148 for (i = nloops; i >= 0; i--) { in ref_rcu_read_section() 156 int i; in ref_rcu_delay_section() local 158 for (i = nloops; i >= 0; i--) { in ref_rcu_delay_section() 182 int i; in srcu_ref_scale_read_section() local 185 for (i = nloops; i >= 0; i--) { in srcu_ref_scale_read_section() 193 int i; in srcu_ref_scale_delay_section() local 196 for (i = nloops; i >= 0; i--) { in srcu_ref_scale_delay_section() 216 int i; in rcu_tasks_ref_scale_read_section() local 218 for (i = nloops; i >= 0; i--) in rcu_tasks_ref_scale_read_section() [all …]
|
D | rcu_segcblist.c | 101 int i; in rcu_segcblist_n_segment_cbs() local 103 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) in rcu_segcblist_n_segment_cbs() 104 len += rcu_segcblist_get_seglen(rsclp, i); in rcu_segcblist_n_segment_cbs() 239 int i; in rcu_segcblist_init() local 244 for (i = 0; i < RCU_CBLIST_NSEGS; i++) { in rcu_segcblist_init() 245 rsclp->tails[i] = &rsclp->head; in rcu_segcblist_init() 246 rcu_segcblist_set_seglen(rsclp, i, 0); in rcu_segcblist_init() 363 int i; in rcu_segcblist_entrain() local 370 for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) in rcu_segcblist_entrain() 371 if (rsclp->tails[i] != rsclp->tails[i - 1]) in rcu_segcblist_entrain() [all …]
|
D | rcuscale.c | 410 int i = 0; in rcu_scale_writer() local 450 wdp = &wdpp[i]; in rcu_scale_writer() 473 i_max = i; in rcu_scale_writer() 477 if (!done && i >= MIN_MEAS) { in rcu_scale_writer() 504 if (started && !alldone && i < MAX_MEAS - 1) in rcu_scale_writer() 505 i++; in rcu_scale_writer() 567 int i, loop = 0; in kfree_scale_thread() local 596 for (i = 0; i < kfree_alloc_num; i++) { in kfree_scale_thread() 641 int i; in kfree_scale_cleanup() local 647 for (i = 0; i < kfree_nrealthreads; i++) in kfree_scale_cleanup() [all …]
|
D | rcutorture.c | 261 unsigned int i = READ_ONCE(rcu_torture_writer_state); in rcu_torture_writer_state_getname() local 263 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) in rcu_torture_writer_state_getname() 265 return rcu_torture_writer_state_names[i]; in rcu_torture_writer_state_getname() 451 int i; in rcu_torture_pipe_update_one() local 458 i = READ_ONCE(rp->rtort_pipe_count); in rcu_torture_pipe_update_one() 459 if (i > RCU_TORTURE_PIPE_LEN) in rcu_torture_pipe_update_one() 460 i = RCU_TORTURE_PIPE_LEN; in rcu_torture_pipe_update_one() 461 atomic_inc(&rcu_torture_wcount[i]); in rcu_torture_pipe_update_one() 462 WRITE_ONCE(rp->rtort_pipe_count, i + 1); in rcu_torture_pipe_update_one() 1343 int i; in rcu_torture_writer() local [all …]
|
/kernel/cgroup/ |
D | misc.c | 143 struct misc_cg *i, *j; in misc_cg_try_charge() local 154 for (i = cg; i; i = parent_misc(i)) { in misc_cg_try_charge() 155 res = &i->res[type]; in misc_cg_try_charge() 167 for (j = i; j; j = parent_misc(j)) { in misc_cg_try_charge() 172 for (j = cg; j != i; j = parent_misc(j)) in misc_cg_try_charge() 174 misc_cg_cancel_charge(type, i, amount); in misc_cg_try_charge() 190 struct misc_cg *i; in misc_cg_uncharge() local 195 for (i = cg; i; i = parent_misc(i)) in misc_cg_uncharge() 196 misc_cg_cancel_charge(type, i, amount); in misc_cg_uncharge() 210 int i; in misc_cg_max_show() local [all …]
|
/kernel/irq/ |
D | timings.c | 281 #define for_each_irqts(i, irqts) \ argument 282 for (i = irqts->count < IRQ_TIMINGS_SIZE ? \ 287 i = (i + 1) & IRQ_TIMINGS_MASK) 384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local 421 for (i = 0; i < count; i++) { in __irq_timings_next_event() 422 int index = (start + i) & IRQ_TIMINGS_MASK; in __irq_timings_next_event() 424 irqs->timings[i] = irqs->circ_timings[index]; in __irq_timings_next_event() 425 min = min_t(int, irqs->timings[i], min); in __irq_timings_next_event() 542 int i, irq = 0; in irq_timings_next_event() local 568 for_each_irqts(i, irqts) { in irq_timings_next_event() [all …]
|
/kernel/trace/ |
D | tracing_map.c | 39 void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n) in tracing_map_update_sum() argument 41 atomic64_add(n, &elt->fields[i].sum); in tracing_map_update_sum() 56 u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i) in tracing_map_read_sum() argument 58 return (u64)atomic64_read(&elt->fields[i].sum); in tracing_map_read_sum() 71 void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n) in tracing_map_set_var() argument 73 atomic64_set(&elt->vars[i], n); in tracing_map_set_var() 74 elt->var_set[i] = true; in tracing_map_set_var() 86 bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i) in tracing_map_var_set() argument 88 return elt->var_set[i]; in tracing_map_var_set() 103 u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i) in tracing_map_read_var() argument [all …]
|
D | trace_events_inject.c | 41 int s, i = 0; in parse_field() local 45 if (!str[i]) in parse_field() 48 while (isspace(str[i])) in parse_field() 49 i++; in parse_field() 50 s = i; in parse_field() 51 while (isalnum(str[i]) || str[i] == '_') in parse_field() 52 i++; in parse_field() 53 len = i - s; in parse_field() 66 while (isspace(str[i])) in parse_field() 67 i++; in parse_field() [all …]
|
D | trace_events_filter.c | 461 int i; in predicate_parse() local 598 for (i = N-1 ; i--; ) { in predicate_parse() 599 int target = prog[i].target; in predicate_parse() 600 if (prog[i].when_to_branch == prog[target].when_to_branch) in predicate_parse() 601 prog[i].target = prog[target].target; in predicate_parse() 605 for (i = 0; i < N; i++) { in predicate_parse() 606 invert = inverts[i] ^ prog[i].when_to_branch; in predicate_parse() 607 prog[i].when_to_branch = invert; in predicate_parse() 609 if (WARN_ON(prog[i].target <= i)) { in predicate_parse() 622 for (i = 0; prog_stack[i].pred; i++) in predicate_parse() [all …]
|
D | trace_stack.c | 39 long i; in print_max_stack() local 46 for (i = 0; i < stack_trace_nr_entries; i++) { in print_max_stack() 47 if (i + 1 == stack_trace_nr_entries) in print_max_stack() 48 size = stack_trace_index[i]; in print_max_stack() 50 size = stack_trace_index[i] - stack_trace_index[i+1]; in print_max_stack() 52 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], in print_max_stack() 53 size, (void *)stack_dump_trace[i]); in print_max_stack() 160 int i, x; in check_stack() local 196 for (i = 0; i < stack_trace_nr_entries; i++) { in check_stack() 197 if (stack_dump_trace[i] == ip) in check_stack() [all …]
|
D | trace_events_synth.c | 138 unsigned int i, size, n_u64; in synth_event_define_fields() local 143 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { in synth_event_define_fields() 144 size = event->fields[i]->size; in synth_event_define_fields() 145 is_signed = event->fields[i]->is_signed; in synth_event_define_fields() 146 type = event->fields[i]->type; in synth_event_define_fields() 147 name = event->fields[i]->name; in synth_event_define_fields() 153 event->fields[i]->offset = n_u64; in synth_event_define_fields() 155 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) { in synth_event_define_fields() 353 unsigned int i, n_u64; in print_synth_event() local 362 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { in print_synth_event() [all …]
|
D | trace_events_hist.c | 473 #define for_each_hist_field(i, hist_data) \ argument 474 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 476 #define for_each_hist_val_field(i, hist_data) \ argument 477 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 479 #define for_each_hist_key_field(i, hist_data) \ argument 480 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 934 unsigned int i; in find_var_ref() local 936 for (i = 0; i < hist_data->n_var_refs; i++) { in find_var_ref() 937 hist_field = hist_data->var_refs[i]; in find_var_ref() 994 int i; in check_var_refs() local [all …]
|
/kernel/ |
D | range.c | 32 int i; in add_range_with_merge() local 38 for (i = 0; i < nr_range; i++) { in add_range_with_merge() 41 if (!range[i].end) in add_range_with_merge() 44 common_start = max(range[i].start, start); in add_range_with_merge() 45 common_end = min(range[i].end, end); in add_range_with_merge() 50 start = min(range[i].start, start); in add_range_with_merge() 51 end = max(range[i].end, end); in add_range_with_merge() 53 memmove(&range[i], &range[i + 1], in add_range_with_merge() 54 (nr_range - (i + 1)) * sizeof(range[i])); in add_range_with_merge() 58 i--; in add_range_with_merge() [all …]
|
D | profile.c | 233 int i, j, cpu; in profile_flip_buffers() local 241 for (i = 0; i < NR_PROFILE_HIT; ++i) { in profile_flip_buffers() 242 if (!hits[i].hits) { in profile_flip_buffers() 243 if (hits[i].pc) in profile_flip_buffers() 244 hits[i].pc = 0; in profile_flip_buffers() 247 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers() 248 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers() 256 int i, cpu; in profile_discard_flip_buffers() local 259 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 263 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() [all …]
|
D | latencytop.c | 116 int i; in account_global_scheduler_latency() local 122 for (i = 0; i < MAXLR; i++) { in account_global_scheduler_latency() 126 if (!latency_record[i].backtrace[0]) { in account_global_scheduler_latency() 127 if (firstnonnull > i) in account_global_scheduler_latency() 128 firstnonnull = i; in account_global_scheduler_latency() 134 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 144 latency_record[i].count++; in account_global_scheduler_latency() 145 latency_record[i].time += lat->time; in account_global_scheduler_latency() 146 if (lat->time > latency_record[i].max) in account_global_scheduler_latency() 147 latency_record[i].max = lat->time; in account_global_scheduler_latency() [all …]
|
D | auditfilter.c | 85 int i; in audit_free_rule() local 92 for (i = 0; i < erule->field_count; i++) in audit_free_rule() 93 audit_free_lsm_field(&erule->fields[i]); in audit_free_rule() 201 int i; in audit_match_class_bits() local 204 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_match_class_bits() 205 if (mask[i] & classes[class][i]) in audit_match_class_bits() 242 int i, err; in audit_to_entry_common() local 281 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_to_entry_common() 282 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common() 284 for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { in audit_to_entry_common() [all …]
|
D | kexec_file.c | 329 int ret = 0, i; in SYSCALL_DEFINE5() local 384 for (i = 0; i < image->nr_segments; i++) { in SYSCALL_DEFINE5() 387 ksegment = &image->segment[i]; in SYSCALL_DEFINE5() 389 i, ksegment->buf, ksegment->bufsz, ksegment->mem, in SYSCALL_DEFINE5() 392 ret = kimage_load_segment(image, &image->segment[i]); in SYSCALL_DEFINE5() 523 u64 i; in kexec_walk_memblock() local 536 for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE, in kexec_walk_memblock() 550 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, in kexec_walk_memblock() 678 int ret = 0, i, j, zero_buf_sz, sha_region_sz; in kexec_calculate_store_digests() local 723 for (j = i = 0; i < image->nr_segments; i++) { in kexec_calculate_store_digests() [all …]
|
D | scftorture.c | 154 int i; in scf_torture_stats_print() local 161 for (i = 0; i < nthreads; i++) { in scf_torture_stats_print() 162 scfs.n_resched += scf_stats_p[i].n_resched; in scf_torture_stats_print() 163 scfs.n_single += scf_stats_p[i].n_single; in scf_torture_stats_print() 164 scfs.n_single_ofl += scf_stats_p[i].n_single_ofl; in scf_torture_stats_print() 165 scfs.n_single_rpc += scf_stats_p[i].n_single_rpc; in scf_torture_stats_print() 166 scfs.n_single_wait += scf_stats_p[i].n_single_wait; in scf_torture_stats_print() 167 scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl; in scf_torture_stats_print() 168 scfs.n_many += scf_stats_p[i].n_many; in scf_torture_stats_print() 169 scfs.n_many_wait += scf_stats_p[i].n_many_wait; in scf_torture_stats_print() [all …]
|
/kernel/debug/ |
D | debug_core.c | 297 int i; in dbg_activate_sw_breakpoints() local 299 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in dbg_activate_sw_breakpoints() 300 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints() 303 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); in dbg_activate_sw_breakpoints() 307 kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints() 311 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints() 312 kgdb_break[i].state = BP_ACTIVE; in dbg_activate_sw_breakpoints() 322 int i; in dbg_set_sw_break() local 327 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in dbg_set_sw_break() 328 if ((kgdb_break[i].state == BP_SET) && in dbg_set_sw_break() [all …]
|
/kernel/sched/ |
D | topology.c | 377 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); in build_perf_domains() local 414 for_each_cpu(i, cpu_map) { in build_perf_domains() 416 if (find_pd(pd, i)) in build_perf_domains() 420 tmp = pd_init(i); in build_perf_domains() 896 int i; in build_balance_mask() local 900 for_each_cpu(i, sg_span) { in build_balance_mask() 901 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask() 915 cpumask_set_cpu(i, mask); in build_balance_mask() 1012 int i; in build_overlap_sched_groups() local 1016 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups() [all …]
|
/kernel/module/ |
D | sysfs.c | 74 unsigned int nloaded = 0, i, size[2]; in add_sect_attrs() local 80 for (i = 0; i < info->hdr->e_shnum; i++) in add_sect_attrs() 81 if (!sect_empty(&info->sechdrs[i])) in add_sect_attrs() 97 for (i = 0; i < info->hdr->e_shnum; i++) { in add_sect_attrs() 98 Elf_Shdr *sec = &info->sechdrs[i]; in add_sect_attrs() 161 unsigned int i) in free_notes_attrs() argument 164 while (i-- > 0) in free_notes_attrs() 166 ¬es_attrs->attrs[i]); in free_notes_attrs() 174 unsigned int notes, loaded, i; in add_notes_attrs() local 184 for (i = 0; i < info->hdr->e_shnum; i++) in add_notes_attrs() [all …]
|
D | kallsyms.c | 115 unsigned int i, nsrc, ndst, strtab_size = 0; in layout_symtab() local 127 for (ndst = i = 0; i < nsrc; i++) { in layout_symtab() 128 if (i == 0 || is_livepatch_module(mod) || in layout_symtab() 129 is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, in layout_symtab() 131 strtab_size += strlen(&info->strtab[src[i].st_name]) + 1; in layout_symtab() 168 unsigned int i, ndst; in add_kallsyms() local 197 for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) { in add_kallsyms() 198 rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info); in add_kallsyms() 199 if (i == 0 || is_livepatch_module(mod) || in add_kallsyms() 200 is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, in add_kallsyms() [all …]
|
/kernel/debug/kdb/ |
D | kdb_bp.c | 176 int i; in kdb_bp_install() local 178 for (i = 0; i < KDB_MAXBPT; i++) { in kdb_bp_install() 179 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_install() 183 __func__, i, bp->bp_enabled); in kdb_bp_install() 207 int i; in kdb_bp_remove() local 209 for (i = KDB_MAXBPT - 1; i >= 0; i--) { in kdb_bp_remove() 210 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_remove() 214 __func__, i, bp->bp_enabled); in kdb_bp_remove() 238 static void kdb_printbp(kdb_bp_t *bp, int i) in kdb_printbp() argument 241 kdb_printf("BP #%d at ", i); in kdb_printbp() [all …]
|
/kernel/power/ |
D | energy_model.c | 70 int i; in em_debug_create_pd() local 83 for (i = 0; i < dev->em_pd->nr_perf_states; i++) in em_debug_create_pd() 84 em_debug_create_ps(&dev->em_pd->table[i], d); in em_debug_create_pd() 112 int i, ret; in em_create_perf_table() local 120 for (i = 0, freq = 0; i < nr_states; i++, freq++) { in em_create_perf_table() 153 table[i].power = power; in em_create_perf_table() 154 table[i].frequency = prev_freq = freq; in em_create_perf_table() 159 for (i = nr_states - 1; i >= 0; i--) { in em_create_perf_table() 163 ret = cb->get_cost(dev, table[i].frequency, &cost); in em_create_perf_table() 170 power_res = table[i].power; in em_create_perf_table() [all …]
|
/kernel/bpf/ |
D | devmap.c | 94 int i; in dev_map_create_hash() local 99 for (i = 0; i < entries; i++) in dev_map_create_hash() 100 INIT_HLIST_HEAD(&hash[i]); in dev_map_create_hash() 186 int i; in dev_map_free() local 209 for (i = 0; i < dtab->n_buckets; i++) { in dev_map_free() 214 head = dev_map_index_hash(dtab, i); in dev_map_free() 227 for (i = 0; i < dtab->map.max_entries; i++) { in dev_map_free() 230 dev = rcu_dereference_raw(dtab->netdev_map[i]); in dev_map_free() 288 int i = 0; in dev_map_hash_get_next_key() local 307 i = idx & (dtab->n_buckets - 1); in dev_map_hash_get_next_key() [all …]
|