Home
last modified time | relevance | path

Searched refs:i (Results 1 – 25 of 121) sorted by relevance

12345

/kernel/
Drange.c30 int i; in add_range_with_merge() local
36 for (i = 0; i < nr_range; i++) { in add_range_with_merge()
39 if (!range[i].end) in add_range_with_merge()
42 common_start = max(range[i].start, start); in add_range_with_merge()
43 common_end = min(range[i].end, end); in add_range_with_merge()
48 start = min(range[i].start, start); in add_range_with_merge()
49 end = max(range[i].end, end); in add_range_with_merge()
51 memmove(&range[i], &range[i + 1], in add_range_with_merge()
52 (nr_range - (i + 1)) * sizeof(range[i])); in add_range_with_merge()
56 i--; in add_range_with_merge()
[all …]
Dlatencytop.c95 int i; in account_global_scheduler_latency() local
104 for (i = 0; i < MAXLR; i++) { in account_global_scheduler_latency()
108 if (!latency_record[i].backtrace[0]) { in account_global_scheduler_latency()
109 if (firstnonnull > i) in account_global_scheduler_latency()
110 firstnonnull = i; in account_global_scheduler_latency()
116 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency()
126 latency_record[i].count++; in account_global_scheduler_latency()
127 latency_record[i].time += lat->time; in account_global_scheduler_latency()
128 if (lat->time > latency_record[i].max) in account_global_scheduler_latency()
129 latency_record[i].max = lat->time; in account_global_scheduler_latency()
[all …]
Dgroups.c19 int i; in groups_alloc() local
34 for (i = 0; i < nblocks; i++) { in groups_alloc()
39 group_info->blocks[i] = b; in groups_alloc()
45 while (--i >= 0) { in groups_alloc()
46 free_page((unsigned long)group_info->blocks[i]); in groups_alloc()
57 int i; in groups_free() local
58 for (i = 0; i < group_info->nblocks; i++) in groups_free()
59 free_page((unsigned long)group_info->blocks[i]); in groups_free()
71 int i; in groups_to_user() local
74 for (i = 0; i < count; i++) { in groups_to_user()
[all …]
Dauditfilter.c94 int i; in audit_free_rule() local
101 for (i = 0; i < erule->field_count; i++) in audit_free_rule()
102 audit_free_lsm_field(&erule->fields[i]); in audit_free_rule()
209 int i; in audit_match_class_bits() local
212 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_match_class_bits()
213 if (mask[i] & classes[class][i]) in audit_match_class_bits()
250 int i, err; in audit_to_entry_common() local
287 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_to_entry_common()
288 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common()
290 for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { in audit_to_entry_common()
[all …]
Dkexec.c163 int result, i; in sanity_check_segment_list() local
180 for (i = 0; i < nr_segments; i++) { in sanity_check_segment_list()
183 mstart = image->segment[i].mem; in sanity_check_segment_list()
184 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
197 for (i = 0; i < nr_segments; i++) { in sanity_check_segment_list()
201 mstart = image->segment[i].mem; in sanity_check_segment_list()
202 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
203 for (j = 0; j < i; j++) { in sanity_check_segment_list()
219 for (i = 0; i < nr_segments; i++) { in sanity_check_segment_list()
220 if (image->segment[i].bufsz > image->segment[i].memsz) in sanity_check_segment_list()
[all …]
Dparams.c73 size_t i; in parameqn() local
75 for (i = 0; i < n; i++) { in parameqn()
76 if (dash2underscore(a[i]) != dash2underscore(b[i])) in parameqn()
106 unsigned int i; in parse_one() local
110 for (i = 0; i < num_params; i++) { in parse_one()
111 if (parameq(param, params[i].name)) { in parse_one()
112 if (params[i].level < min_level in parse_one()
113 || params[i].level > max_level) in parse_one()
117 !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG)) in parse_one()
120 params[i].ops->set); in parse_one()
[all …]
Dcapability.c168 unsigned i; in SYSCALL_DEFINE2() local
170 for (i = 0; i < tocopy; i++) { in SYSCALL_DEFINE2()
171 kdata[i].effective = pE.cap[i]; in SYSCALL_DEFINE2()
172 kdata[i].permitted = pP.cap[i]; in SYSCALL_DEFINE2()
173 kdata[i].inheritable = pI.cap[i]; in SYSCALL_DEFINE2()
225 unsigned i, tocopy, copybytes; in SYSCALL_DEFINE2() local
249 for (i = 0; i < tocopy; i++) { in SYSCALL_DEFINE2()
250 effective.cap[i] = kdata[i].effective; in SYSCALL_DEFINE2()
251 permitted.cap[i] = kdata[i].permitted; in SYSCALL_DEFINE2()
252 inheritable.cap[i] = kdata[i].inheritable; in SYSCALL_DEFINE2()
[all …]
Dkprobes.c156 int i; in __get_insn_slot() local
157 for (i = 0; i < slots_per_page(c); i++) { in __get_insn_slot()
158 if (kip->slot_used[i] == SLOT_CLEAN) { in __get_insn_slot()
159 kip->slot_used[i] = SLOT_USED; in __get_insn_slot()
161 slot = kip->insns + (i * c->insn_size); in __get_insn_slot()
233 int i; in collect_garbage_slots() local
237 for (i = 0; i < slots_per_page(c); i++) { in collect_garbage_slots()
238 if (kip->slot_used[i] == SLOT_DIRTY && in collect_garbage_slots()
239 collect_one_slot(kip, i)) in collect_garbage_slots()
422 int i; in get_optimized_kprobe() local
[all …]
Dmodule.c224 unsigned int i; in find_sec() local
226 for (i = 1; i < info->hdr->e_shnum; i++) { in find_sec()
227 Elf_Shdr *shdr = &info->sechdrs[i]; in find_sec()
231 return i; in find_sec()
719 struct module *i = use->target; in module_unload_free() local
720 pr_debug("%s unusing %s\n", mod->name, i->name); in module_unload_free()
721 module_put(i); in module_unload_free()
1145 unsigned int i, num_versions; in check_version() local
1160 for (i = 0; i < num_versions; i++) { in check_version()
1161 if (strcmp(versions[i].name, symname) != 0) in check_version()
[all …]
Dpanic.c76 long i, i_next = 0; in panic() local
170 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { in panic()
172 if (i >= i_next) { in panic()
173 i += panic_blink(state ^= 1); in panic()
174 i_next = i + 3600 / PANIC_BLINK_SPD; in panic()
205 for (i = 0; ; i += PANIC_TIMER_STEP) { in panic()
207 if (i >= i_next) { in panic()
208 i += panic_blink(state ^= 1); in panic()
209 i_next = i + 3600 / PANIC_BLINK_SPD; in panic()
269 int i; in print_tainted() local
[all …]
Dprofile.c245 int i, j, cpu; in profile_flip_buffers() local
253 for (i = 0; i < NR_PROFILE_HIT; ++i) { in profile_flip_buffers()
254 if (!hits[i].hits) { in profile_flip_buffers()
255 if (hits[i].pc) in profile_flip_buffers()
256 hits[i].pc = 0; in profile_flip_buffers()
259 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers()
260 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers()
268 int i, cpu; in profile_discard_flip_buffers() local
271 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers()
275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers()
[all …]
/kernel/rcu/
Drcutorture.c310 int i; in rcu_torture_pipe_update_one() local
312 i = rp->rtort_pipe_count; in rcu_torture_pipe_update_one()
313 if (i > RCU_TORTURE_PIPE_LEN) in rcu_torture_pipe_update_one()
314 i = RCU_TORTURE_PIPE_LEN; in rcu_torture_pipe_update_one()
315 atomic_inc(&rcu_torture_wcount[i]); in rcu_torture_pipe_update_one()
780 int i; in rcu_torture_cbflood() local
804 for (i = 0; i < cbflood_n_burst; i++) { in rcu_torture_cbflood()
806 cur_ops->call(&rhp[i * cbflood_n_per_burst + j], in rcu_torture_cbflood()
861 int i; in rcu_torture_writer() local
917 i = old_rp->rtort_pipe_count; in rcu_torture_writer()
[all …]
/kernel/irq/
Dirqdesc.c200 int i; in alloc_descs() local
202 for (i = 0; i < cnt; i++) { in alloc_descs()
203 desc = alloc_desc(start + i, node, owner); in alloc_descs()
207 irq_insert_desc(start + i, desc); in alloc_descs()
213 for (i--; i >= 0; i--) in alloc_descs()
214 free_desc(start + i); in alloc_descs()
232 int i, initcnt, node = first_online_node; in early_irq_init() local
250 for (i = 0; i < initcnt; i++) { in early_irq_init()
251 desc = alloc_desc(i, node, NULL); in early_irq_init()
252 set_bit(i, allocated_irqs); in early_irq_init()
[all …]
Dgeneric-chip.c239 int i; in irq_gc_init_mask_cache() local
241 for (i = 0; i < gc->num_ct; i++) { in irq_gc_init_mask_cache()
243 mskptr = &ct[i].mask_cache_priv; in irq_gc_init_mask_cache()
244 mskreg = ct[i].regs.mask; in irq_gc_init_mask_cache()
246 ct[i].mask_cache = mskptr; in irq_gc_init_mask_cache()
271 int numchips, sz, i; in irq_alloc_domain_generic_chips() local
298 for (i = 0; i < numchips; i++) { in irq_alloc_domain_generic_chips()
300 dgc->gc[i] = gc = tmp; in irq_alloc_domain_generic_chips()
301 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip, in irq_alloc_domain_generic_chips()
423 unsigned int i; in irq_setup_generic_chip() local
[all …]
Dautoprobe.c35 int i; in probe_irq_on() local
46 for_each_irq_desc_reverse(i, desc) { in probe_irq_on()
69 for_each_irq_desc_reverse(i, desc) { in probe_irq_on()
87 for_each_irq_desc(i, desc) { in probe_irq_on()
96 if (i < 32) in probe_irq_on()
97 mask |= 1 << i; in probe_irq_on()
122 int i; in probe_irq_mask() local
124 for_each_irq_desc(i, desc) { in probe_irq_mask()
127 if (i < 16 && !(desc->istate & IRQS_WAITING)) in probe_irq_mask()
128 mask |= 1 << i; in probe_irq_mask()
[all …]
/kernel/gcov/
Dgcc_3_4.c157 unsigned int i; in num_counter_active() local
160 for (i = 0; i < GCOV_COUNTERS; i++) { in num_counter_active()
161 if (counter_active(info, i)) in num_counter_active()
174 unsigned int i; in gcov_info_reset() local
176 for (i = 0; i < active; i++) { in gcov_info_reset()
177 memset(info->counts[i].values, 0, in gcov_info_reset()
178 info->counts[i].num * sizeof(gcov_type)); in gcov_info_reset()
203 unsigned int i; in gcov_info_add() local
206 for (i = 0; i < num_counter_active(dest); i++) { in gcov_info_add()
207 for (j = 0; j < dest->counts[i].num; j++) { in gcov_info_add()
[all …]
Dfs.c96 loff_t i; in gcov_seq_start() local
99 for (i = 0; i < *pos; i++) { in gcov_seq_start()
160 int i = 0; in get_accumulated_info() local
165 info = gcov_info_dup(node->loaded_info[i++]); in get_accumulated_info()
168 for (; i < node->num_loaded; i++) in get_accumulated_info()
169 gcov_info_add(info, node->loaded_info[i]); in get_accumulated_info()
257 int i; in reset_node() local
261 for (i = 0; i < node->num_loaded; i++) in reset_node()
262 gcov_info_reset(node->loaded_info[i]); in reset_node()
371 int i; in add_links() local
[all …]
/kernel/debug/
Ddebug_core.c235 int i; in kgdb_flush_swbreak_addr() local
237 for (i = 0; i < VMACACHE_SIZE; i++) { in kgdb_flush_swbreak_addr()
238 if (!current->vmacache[i]) in kgdb_flush_swbreak_addr()
240 flush_cache_range(current->vmacache[i], in kgdb_flush_swbreak_addr()
256 int i; in dbg_activate_sw_breakpoints() local
258 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in dbg_activate_sw_breakpoints()
259 if (kgdb_break[i].state != BP_SET) in dbg_activate_sw_breakpoints()
262 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); in dbg_activate_sw_breakpoints()
266 kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints()
270 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr); in dbg_activate_sw_breakpoints()
[all …]
Dgdbstub.c62 int i; in gdbstub_read_wait() local
73 for (i = 0; kdb_poll_funcs[i] != NULL; i++) { in gdbstub_read_wait()
74 ret = kdb_poll_funcs[i](); in gdbstub_read_wait()
201 int i; in gdbstub_msg_write() local
220 for (i = 0; i < wcount; i++) in gdbstub_msg_write()
221 bufptr = hex_byte_pack(bufptr, s[i]); in gdbstub_msg_write()
343 int i; in pt_regs_to_gdb_regs() local
347 for (i = 0; i < DBG_MAX_REG_NUM; i++) { in pt_regs_to_gdb_regs()
348 dbg_get_reg(i, ptr + idx, regs); in pt_regs_to_gdb_regs()
349 idx += dbg_reg_def[i].size; in pt_regs_to_gdb_regs()
[all …]
/kernel/trace/
Dtrace_stack.c55 long i; in print_max_stack() local
62 for (i = 0; i < max_stack_trace.nr_entries; i++) { in print_max_stack()
63 if (stack_dump_trace[i] == ULONG_MAX) in print_max_stack()
65 if (i+1 == max_stack_trace.nr_entries || in print_max_stack()
66 stack_dump_trace[i+1] == ULONG_MAX) in print_max_stack()
67 size = stack_dump_index[i]; in print_max_stack()
69 size = stack_dump_index[i] - stack_dump_index[i+1]; in print_max_stack()
71 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i], in print_max_stack()
72 size, (void *)stack_dump_trace[i]); in print_max_stack()
82 int i; in check_stack() local
[all …]
/kernel/debug/kdb/
Dkdb_bp.c176 int i; in kdb_bp_install() local
178 for (i = 0; i < KDB_MAXBPT; i++) { in kdb_bp_install()
179 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_install()
183 __func__, i, bp->bp_enabled); in kdb_bp_install()
207 int i; in kdb_bp_remove() local
209 for (i = KDB_MAXBPT - 1; i >= 0; i--) { in kdb_bp_remove()
210 kdb_bp_t *bp = &kdb_breakpoints[i]; in kdb_bp_remove()
214 __func__, i, bp->bp_enabled); in kdb_bp_remove()
238 static void kdb_printbp(kdb_bp_t *bp, int i) in kdb_printbp() argument
241 kdb_printf("BP #%d at ", i); in kdb_printbp()
[all …]
Dkdb_main.c203 int i; in kdbgetenv() local
205 for (i = 0; i < __nenv; i++) { in kdbgetenv()
354 int i; in kdb_set() local
406 for (i = 0; i < __nenv; i++) { in kdb_set()
407 if (__env[i] in kdb_set()
408 && ((strncmp(__env[i], argv[1], varlen) == 0) in kdb_set()
409 && ((__env[i][varlen] == '\0') in kdb_set()
410 || (__env[i][varlen] == '=')))) { in kdb_set()
411 __env[i] = ep; in kdb_set()
419 for (i = 0; i < __nenv-1; i++) { in kdb_set()
[all …]
/kernel/locking/
Dlocktorture.c502 int i, n_stress; in __torture_print_stats() local
508 for (i = 0; i < n_stress; i++) { in __torture_print_stats()
509 if (statp[i].n_lock_fail) in __torture_print_stats()
511 sum += statp[i].n_lock_acquired; in __torture_print_stats()
512 if (max < statp[i].n_lock_fail) in __torture_print_stats()
513 max = statp[i].n_lock_fail; in __torture_print_stats()
514 if (min > statp[i].n_lock_fail) in __torture_print_stats()
515 min = statp[i].n_lock_fail; in __torture_print_stats()
600 int i; in lock_torture_cleanup() local
606 for (i = 0; i < cxt.nrealwriters_stress; i++) in lock_torture_cleanup()
[all …]
/kernel/sched/
Dcpupri.c210 int i; in cpupri_init() local
214 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { in cpupri_init()
215 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; in cpupri_init()
226 for_each_possible_cpu(i) in cpupri_init()
227 cp->cpu_to_pri[i] = CPUPRI_INVALID; in cpupri_init()
232 for (i--; i >= 0; i--) in cpupri_init()
233 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init()
243 int i; in cpupri_cleanup() local
246 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) in cpupri_cleanup()
247 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
/kernel/bpf/
Dverifier.c247 int i; in print_verifier_state() local
249 for (i = 0; i < MAX_BPF_REG; i++) { in print_verifier_state()
250 t = env->cur_state.regs[i].type; in print_verifier_state()
253 verbose(" R%d=%s", i, reg_type_str[t]); in print_verifier_state()
255 verbose("%d", env->cur_state.regs[i].imm); in print_verifier_state()
259 env->cur_state.regs[i].map_ptr->key_size, in print_verifier_state()
260 env->cur_state.regs[i].map_ptr->value_size); in print_verifier_state()
262 for (i = 0; i < MAX_BPF_STACK; i++) { in print_verifier_state()
263 if (env->cur_state.stack[i].stype == STACK_SPILL) in print_verifier_state()
264 verbose(" fp%d=%s", -MAX_BPF_STACK + i, in print_verifier_state()
[all …]

12345