/kernel/trace/ |
D | tracing_map.c | 47 void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n) in tracing_map_update_sum() argument 49 atomic64_add(n, &elt->fields[i].sum); in tracing_map_update_sum() 64 u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i) in tracing_map_read_sum() argument 66 return (u64)atomic64_read(&elt->fields[i].sum); in tracing_map_read_sum() 208 unsigned int i; in tracing_map_array_clear() local 213 for (i = 0; i < a->n_pages; i++) in tracing_map_array_clear() 214 memset(a->pages[i], 0, PAGE_SIZE); in tracing_map_array_clear() 219 unsigned int i; in tracing_map_array_free() local 227 for (i = 0; i < a->n_pages; i++) { in tracing_map_array_free() 228 if (!a->pages[i]) in tracing_map_array_free() [all …]
|
D | trace_stack.c | 45 long i; in stack_trace_print() local 52 for (i = 0; i < stack_trace_max.nr_entries; i++) { in stack_trace_print() 53 if (stack_dump_trace[i] == ULONG_MAX) in stack_trace_print() 55 if (i+1 == stack_trace_max.nr_entries || in stack_trace_print() 56 stack_dump_trace[i+1] == ULONG_MAX) in stack_trace_print() 57 size = stack_trace_index[i]; in stack_trace_print() 59 size = stack_trace_index[i] - stack_trace_index[i+1]; in stack_trace_print() 61 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], in stack_trace_print() 62 size, (void *)stack_dump_trace[i]); in stack_trace_print() 80 int i, x; in check_stack() local [all …]
|
D | trace_events_hist.c | 95 #define for_each_hist_field(i, hist_data) \ argument 96 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 98 #define for_each_hist_val_field(i, hist_data) \ argument 99 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 101 #define for_each_hist_key_field(i, hist_data) \ argument 102 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 299 unsigned int i; in hist_trigger_elt_comm_alloc() local 301 for_each_hist_key_field(i, hist_data) { in hist_trigger_elt_comm_alloc() 302 key_field = hist_data->fields[i]; in hist_trigger_elt_comm_alloc() 403 unsigned int i; in destroy_hist_fields() local [all …]
|
/kernel/ |
D | range.c | 30 int i; in add_range_with_merge() local 36 for (i = 0; i < nr_range; i++) { in add_range_with_merge() 39 if (!range[i].end) in add_range_with_merge() 42 common_start = max(range[i].start, start); in add_range_with_merge() 43 common_end = min(range[i].end, end); in add_range_with_merge() 48 start = min(range[i].start, start); in add_range_with_merge() 49 end = max(range[i].end, end); in add_range_with_merge() 51 memmove(&range[i], &range[i + 1], in add_range_with_merge() 52 (nr_range - (i + 1)) * sizeof(range[i])); in add_range_with_merge() 56 i--; in add_range_with_merge() [all …]
|
D | kexec_file.c | 260 int ret = 0, i; in SYSCALL_DEFINE5() local 307 for (i = 0; i < image->nr_segments; i++) { in SYSCALL_DEFINE5() 310 ksegment = &image->segment[i]; in SYSCALL_DEFINE5() 312 i, ksegment->buf, ksegment->bufsz, ksegment->mem, in SYSCALL_DEFINE5() 315 ret = kimage_load_segment(image, &image->segment[i]); in SYSCALL_DEFINE5() 506 int ret = 0, i, j, zero_buf_sz, sha_region_sz; in kexec_calculate_store_digests() local 547 for (j = i = 0; i < image->nr_segments; i++) { in kexec_calculate_store_digests() 550 ksegment = &image->segment[i]; in kexec_calculate_store_digests() 622 int i, ret = 0, entry_sidx = -1; in __kexec_load_purgatory() local 660 for (i = 0; i < pi->ehdr->e_shnum; i++) { in __kexec_load_purgatory() [all …]
|
D | latencytop.c | 95 int i; in account_global_scheduler_latency() local 104 for (i = 0; i < MAXLR; i++) { in account_global_scheduler_latency() 108 if (!latency_record[i].backtrace[0]) { in account_global_scheduler_latency() 109 if (firstnonnull > i) in account_global_scheduler_latency() 110 firstnonnull = i; in account_global_scheduler_latency() 116 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 126 latency_record[i].count++; in account_global_scheduler_latency() 127 latency_record[i].time += lat->time; in account_global_scheduler_latency() 128 if (lat->time > latency_record[i].max) in account_global_scheduler_latency() 129 latency_record[i].max = lat->time; in account_global_scheduler_latency() [all …]
|
D | profile.c | 246 int i, j, cpu; in profile_flip_buffers() local 254 for (i = 0; i < NR_PROFILE_HIT; ++i) { in profile_flip_buffers() 255 if (!hits[i].hits) { in profile_flip_buffers() 256 if (hits[i].pc) in profile_flip_buffers() 257 hits[i].pc = 0; in profile_flip_buffers() 260 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers() 261 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers() 269 int i, cpu; in profile_discard_flip_buffers() local 272 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 276 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() [all …]
|
D | auditfilter.c | 94 int i; in audit_free_rule() local 101 for (i = 0; i < erule->field_count; i++) in audit_free_rule() 102 audit_free_lsm_field(&erule->fields[i]); in audit_free_rule() 209 int i; in audit_match_class_bits() local 212 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_match_class_bits() 213 if (mask[i] & classes[class][i]) in audit_match_class_bits() 250 int i, err; in audit_to_entry_common() local 287 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_to_entry_common() 288 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common() 290 for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { in audit_to_entry_common() [all …]
|
D | kprobes.c | 156 int i; in __get_insn_slot() local 157 for (i = 0; i < slots_per_page(c); i++) { in __get_insn_slot() 158 if (kip->slot_used[i] == SLOT_CLEAN) { in __get_insn_slot() 159 kip->slot_used[i] = SLOT_USED; in __get_insn_slot() 161 slot = kip->insns + (i * c->insn_size); in __get_insn_slot() 233 int i; in collect_garbage_slots() local 237 for (i = 0; i < slots_per_page(c); i++) { in collect_garbage_slots() 238 if (kip->slot_used[i] == SLOT_DIRTY && in collect_garbage_slots() 239 collect_one_slot(kip, i)) in collect_garbage_slots() 422 int i; in get_optimized_kprobe() local [all …]
|
D | params.c | 97 size_t i; in parameqn() local 99 for (i = 0; i < n; i++) { in parameqn() 100 if (dash2underscore(a[i]) != dash2underscore(b[i])) in parameqn() 131 unsigned int i; in parse_one() local 135 for (i = 0; i < num_params; i++) { in parse_one() 136 if (parameq(param, params[i].name)) { in parse_one() 137 if (params[i].level < min_level in parse_one() 138 || params[i].level > max_level) in parse_one() 142 !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG)) in parse_one() 145 params[i].ops->set); in parse_one() [all …]
|
D | capability.c | 169 unsigned i; in SYSCALL_DEFINE2() local 171 for (i = 0; i < tocopy; i++) { in SYSCALL_DEFINE2() 172 kdata[i].effective = pE.cap[i]; in SYSCALL_DEFINE2() 173 kdata[i].permitted = pP.cap[i]; in SYSCALL_DEFINE2() 174 kdata[i].inheritable = pI.cap[i]; in SYSCALL_DEFINE2() 226 unsigned i, tocopy, copybytes; in SYSCALL_DEFINE2() local 250 for (i = 0; i < tocopy; i++) { in SYSCALL_DEFINE2() 251 effective.cap[i] = kdata[i].effective; in SYSCALL_DEFINE2() 252 permitted.cap[i] = kdata[i].permitted; in SYSCALL_DEFINE2() 253 inheritable.cap[i] = kdata[i].inheritable; in SYSCALL_DEFINE2() [all …]
|
D | groups.c | 44 int i; in groups_to_user() local 47 for (i = 0; i < count; i++) { in groups_to_user() 49 gid = from_kgid_munged(user_ns, group_info->gid[i]); in groups_to_user() 50 if (put_user(gid, grouplist+i)) in groups_to_user() 61 int i; in groups_from_user() local 64 for (i = 0; i < count; i++) { in groups_from_user() 67 if (get_user(gid, grouplist+i)) in groups_from_user() 74 group_info->gid[i] = kgid; in groups_from_user() 168 int i; in SYSCALL_DEFINE2() local 174 i = cred->group_info->ngroups; in SYSCALL_DEFINE2() [all …]
|
/kernel/debug/ |
D | debug_core.c | 232 int i; in kgdb_flush_swbreak_addr() local 234 for (i = 0; i < VMACACHE_SIZE; i++) { in kgdb_flush_swbreak_addr() 235 if (!current->vmacache[i]) in kgdb_flush_swbreak_addr() 237 flush_cache_range(current->vmacache[i], in kgdb_flush_swbreak_addr() 253 int i; in dbg_activate_sw_breakpoints() local 255 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in dbg_activate_sw_breakpoints() 256 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints() 259 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); in dbg_activate_sw_breakpoints() 263 kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints() 267 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints() [all …]
|
D | gdbstub.c | 62 int i; in gdbstub_read_wait() local 73 for (i = 0; kdb_poll_funcs[i] != NULL; i++) { in gdbstub_read_wait() 74 ret = kdb_poll_funcs[i](); in gdbstub_read_wait() 201 int i; in gdbstub_msg_write() local 220 for (i = 0; i < wcount; i++) in gdbstub_msg_write() 221 bufptr = hex_byte_pack(bufptr, s[i]); in gdbstub_msg_write() 343 int i; in pt_regs_to_gdb_regs() local 347 for (i = 0; i < DBG_MAX_REG_NUM; i++) { in pt_regs_to_gdb_regs() 348 dbg_get_reg(i, ptr + idx, regs); in pt_regs_to_gdb_regs() 349 idx += dbg_reg_def[i].size; in pt_regs_to_gdb_regs() [all …]
|
/kernel/rcu/ |
D | rcutorture.c | 178 unsigned int i = READ_ONCE(rcu_torture_writer_state); in rcu_torture_writer_state_getname() local 180 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) in rcu_torture_writer_state_getname() 182 return rcu_torture_writer_state_names[i]; in rcu_torture_writer_state_getname() 321 int i; in rcu_torture_pipe_update_one() local 323 i = rp->rtort_pipe_count; in rcu_torture_pipe_update_one() 324 if (i > RCU_TORTURE_PIPE_LEN) in rcu_torture_pipe_update_one() 325 i = RCU_TORTURE_PIPE_LEN; in rcu_torture_pipe_update_one() 326 atomic_inc(&rcu_torture_wcount[i]); in rcu_torture_pipe_update_one() 836 int i; in rcu_torture_cbflood() local 858 for (i = 0; i < cbflood_n_burst; i++) { in rcu_torture_cbflood() [all …]
|
D | rcuperf.c | 351 int i = 0; in rcu_perf_writer() local 382 wdp = &wdpp[i]; in rcu_perf_writer() 394 i_max = i; in rcu_perf_writer() 398 if (!done && i >= MIN_MEAS) { in rcu_perf_writer() 427 if (started && !alldone && i < MAX_MEAS - 1) in rcu_perf_writer() 428 i++; in rcu_perf_writer() 448 int i; in rcu_perf_cleanup() local 458 for (i = 0; i < nrealreaders; i++) in rcu_perf_cleanup() 460 reader_tasks[i]); in rcu_perf_cleanup() 465 for (i = 0; i < nrealwriters; i++) { in rcu_perf_cleanup() [all …]
|
/kernel/gcov/ |
D | gcc_3_4.c | 157 unsigned int i; in num_counter_active() local 160 for (i = 0; i < GCOV_COUNTERS; i++) { in num_counter_active() 161 if (counter_active(info, i)) in num_counter_active() 174 unsigned int i; in gcov_info_reset() local 176 for (i = 0; i < active; i++) { in gcov_info_reset() 177 memset(info->counts[i].values, 0, in gcov_info_reset() 178 info->counts[i].num * sizeof(gcov_type)); in gcov_info_reset() 203 unsigned int i; in gcov_info_add() local 206 for (i = 0; i < num_counter_active(dest); i++) { in gcov_info_add() 207 for (j = 0; j < dest->counts[i].num; j++) { in gcov_info_add() [all …]
|
D | fs.c | 96 loff_t i; in gcov_seq_start() local 99 for (i = 0; i < *pos; i++) { in gcov_seq_start() 160 int i = 0; in get_accumulated_info() local 165 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info() 168 for (; i < node->num_loaded; i++) in get_accumulated_info() 169 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info() 257 int i; in reset_node() local 261 for (i = 0; i < node->num_loaded; i++) in reset_node() 262 gcov_info_reset(node->loaded_info[i]); in reset_node() 371 int i; in add_links() local [all …]
|
/kernel/debug/kdb/ |
D | kdb_bp.c | 176 int i; in kdb_bp_install() local 178 for (i = 0; i < KDB_MAXBPT; i++) { in kdb_bp_install() 179 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_install() 183 __func__, i, bp->bp_enabled); in kdb_bp_install() 207 int i; in kdb_bp_remove() local 209 for (i = KDB_MAXBPT - 1; i >= 0; i--) { in kdb_bp_remove() 210 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_remove() 214 __func__, i, bp->bp_enabled); in kdb_bp_remove() 238 static void kdb_printbp(kdb_bp_t *bp, int i) in kdb_printbp() argument 241 kdb_printf("BP #%d at ", i); in kdb_printbp() [all …]
|
D | kdb_main.c | 231 int i; in kdbgetenv() local 233 for (i = 0; i < __nenv; i++) { in kdbgetenv() 382 int i; in kdb_set() local 434 for (i = 0; i < __nenv; i++) { in kdb_set() 435 if (__env[i] in kdb_set() 436 && ((strncmp(__env[i], argv[1], varlen) == 0) in kdb_set() 437 && ((__env[i][varlen] == '\0') in kdb_set() 438 || (__env[i][varlen] == '=')))) { in kdb_set() 439 __env[i] = ep; in kdb_set() 447 for (i = 0; i < __nenv-1; i++) { in kdb_set() [all …]
|
/kernel/irq/ |
D | irqdesc.c | 425 int i; in alloc_descs() local 429 for (i = 0, mask = affinity; i < cnt; i++, mask++) { in alloc_descs() 438 for (i = 0; i < cnt; i++) { in alloc_descs() 444 desc = alloc_desc(start + i, node, flags, mask, owner); in alloc_descs() 447 irq_insert_desc(start + i, desc); in alloc_descs() 448 irq_sysfs_add(start + i, desc); in alloc_descs() 454 for (i--; i >= 0; i--) in alloc_descs() 455 free_desc(start + i); in alloc_descs() 469 int i, initcnt, node = first_online_node; in early_irq_init() local 487 for (i = 0; i < initcnt; i++) { in early_irq_init() [all …]
|
/kernel/locking/ |
D | locktorture.c | 643 int i, n_stress; in __torture_print_stats() local 648 for (i = 0; i < n_stress; i++) { in __torture_print_stats() 649 if (statp[i].n_lock_fail) in __torture_print_stats() 651 sum += statp[i].n_lock_acquired; in __torture_print_stats() 652 if (max < statp[i].n_lock_fail) in __torture_print_stats() 653 max = statp[i].n_lock_fail; in __torture_print_stats() 654 if (min > statp[i].n_lock_fail) in __torture_print_stats() 655 min = statp[i].n_lock_fail; in __torture_print_stats() 740 int i; in lock_torture_cleanup() local 755 for (i = 0; i < cxt.nrealwriters_stress; i++) in lock_torture_cleanup() [all …]
|
D | lockdep.c | 162 int i; in lock_point() local 164 for (i = 0; i < LOCKSTAT_POINTS; i++) { in lock_point() 165 if (points[i] == 0) { in lock_point() 166 points[i] = ip; in lock_point() 169 if (points[i] == ip) in lock_point() 173 return i; in lock_point() 206 int cpu, i; in lock_stats() local 213 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) in lock_stats() 214 stats.contention_point[i] += pcs->contention_point[i]; in lock_stats() 216 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) in lock_stats() [all …]
|
/kernel/sched/ |
D | cpupri.c | 210 int i; in cpupri_init() local 214 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { in cpupri_init() 215 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; in cpupri_init() 226 for_each_possible_cpu(i) in cpupri_init() 227 cp->cpu_to_pri[i] = CPUPRI_INVALID; in cpupri_init() 232 for (i--; i >= 0; i--) in cpupri_init() 233 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init() 243 int i; in cpupri_cleanup() local 246 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) in cpupri_cleanup() 247 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
|
/kernel/bpf/ |
D | verifier.c | 197 int i; in print_verifier_state() local 199 for (i = 0; i < MAX_BPF_REG; i++) { in print_verifier_state() 200 reg = &state->regs[i]; in print_verifier_state() 204 verbose(" R%d=%s", i, reg_type_str[t]); in print_verifier_state() 226 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { in print_verifier_state() 227 if (state->stack_slot_type[i] == STACK_SPILL) in print_verifier_state() 228 verbose(" fp%d=%s", -MAX_BPF_STACK + i, in print_verifier_state() 229 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); in print_verifier_state() 442 int i; in init_reg_state() local 444 for (i = 0; i < MAX_BPF_REG; i++) { in init_reg_state() [all …]
|