/kernel/ |
D | rcutorture.c | 306 int i; in rcu_torture_cb() local 314 i = rp->rtort_pipe_count; in rcu_torture_cb() 315 if (i > RCU_TORTURE_PIPE_LEN) in rcu_torture_cb() 316 i = RCU_TORTURE_PIPE_LEN; in rcu_torture_cb() 317 atomic_inc(&rcu_torture_wcount[i]); in rcu_torture_cb() 347 int i; in rcu_sync_torture_deferred_free() local 354 i = rp->rtort_pipe_count; in rcu_sync_torture_deferred_free() 355 if (i > RCU_TORTURE_PIPE_LEN) in rcu_sync_torture_deferred_free() 356 i = RCU_TORTURE_PIPE_LEN; in rcu_sync_torture_deferred_free() 357 atomic_inc(&rcu_torture_wcount[i]); in rcu_sync_torture_deferred_free() [all …]
|
D | latencytop.c | 57 int i; in account_global_scheduler_latency() local 66 for (i = 0; i < MAXLR; i++) { in account_global_scheduler_latency() 70 if (!latency_record[i].backtrace[0]) { in account_global_scheduler_latency() 71 if (firstnonnull > i) in account_global_scheduler_latency() 72 firstnonnull = i; in account_global_scheduler_latency() 78 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 88 latency_record[i].count++; in account_global_scheduler_latency() 89 latency_record[i].time += lat->time; in account_global_scheduler_latency() 90 if (lat->time > latency_record[i].max) in account_global_scheduler_latency() 91 latency_record[i].max = lat->time; in account_global_scheduler_latency() [all …]
|
D | module.c | 128 unsigned int i; in find_sec() local 130 for (i = 1; i < hdr->e_shnum; i++) in find_sec() 132 if ((sechdrs[i].sh_flags & SHF_ALLOC) in find_sec() 133 && strcmp(secstrings+sechdrs[i].sh_name, name) == 0) in find_sec() 134 return i; in find_sec() 207 unsigned int i, j; in each_symbol_in_section() local 210 for (i = 0; i < arr[j].stop - arr[j].start; i++) in each_symbol_in_section() 211 if (fn(&arr[j], owner, i, data)) in each_symbol_in_section() 374 static int split_block(unsigned int i, unsigned short size) in split_block() argument 390 memmove(&pcpu_size[i+1], &pcpu_size[i], in split_block() [all …]
|
D | kprobes.c | 159 int i; in __get_insn_slot() local 160 for (i = 0; i < INSNS_PER_PAGE; i++) { in __get_insn_slot() 161 if (kip->slot_used[i] == SLOT_CLEAN) { in __get_insn_slot() 162 kip->slot_used[i] = SLOT_USED; in __get_insn_slot() 164 return kip->insns + (i * MAX_INSN_SIZE); in __get_insn_slot() 249 int i; in collect_garbage_slots() local 253 for (i = 0; i < INSNS_PER_PAGE; i++) { in collect_garbage_slots() 254 if (kip->slot_used[i] == SLOT_DIRTY && in collect_garbage_slots() 255 collect_one_slot(kip, i)) in collect_garbage_slots() 272 int i = (slot - kip->insns) / MAX_INSN_SIZE; in free_insn_slot() local [all …]
|
D | kgdb.c | 529 int i = 4; in int_to_threadref() local 532 while (i--) in int_to_threadref() 623 int i; in kgdb_activate_sw_breakpoints() local 625 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in kgdb_activate_sw_breakpoints() 626 if (kgdb_break[i].state != BP_SET) in kgdb_activate_sw_breakpoints() 629 addr = kgdb_break[i].bpt_addr; in kgdb_activate_sw_breakpoints() 631 kgdb_break[i].saved_instr); in kgdb_activate_sw_breakpoints() 636 kgdb_break[i].state = BP_ACTIVE; in kgdb_activate_sw_breakpoints() 645 int i; in kgdb_set_sw_break() local 650 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { in kgdb_set_sw_break() [all …]
|
D | capability.c | 184 unsigned i; in SYSCALL_DEFINE2() local 186 for (i = 0; i < tocopy; i++) { in SYSCALL_DEFINE2() 187 kdata[i].effective = pE.cap[i]; in SYSCALL_DEFINE2() 188 kdata[i].permitted = pP.cap[i]; in SYSCALL_DEFINE2() 189 kdata[i].inheritable = pI.cap[i]; in SYSCALL_DEFINE2() 241 unsigned i, tocopy; in SYSCALL_DEFINE2() local 262 for (i = 0; i < tocopy; i++) { in SYSCALL_DEFINE2() 263 effective.cap[i] = kdata[i].effective; in SYSCALL_DEFINE2() 264 permitted.cap[i] = kdata[i].permitted; in SYSCALL_DEFINE2() 265 inheritable.cap[i] = kdata[i].inheritable; in SYSCALL_DEFINE2() [all …]
|
D | auditfilter.c | 137 int i; in audit_free_rule() local 143 for (i = 0; i < e->rule.field_count; i++) { in audit_free_rule() 144 struct audit_field *f = &e->rule.fields[i]; in audit_free_rule() 323 int i; in audit_match_class_bits() local 326 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_match_class_bits() 327 if (mask[i] & classes[class][i]) in audit_match_class_bits() 364 int i, err; in audit_to_entry_common() local 399 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) in audit_to_entry_common() 400 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common() 402 for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { in audit_to_entry_common() [all …]
|
D | panic.c | 61 long i; in panic() local 109 for (i = 0; i < panic_timeout*1000; ) { in panic() 111 i += panic_blink(i); in panic() 113 i++; in panic() 133 for (i = 0;;) { in panic() 135 i += panic_blink(i); in panic() 137 i++; in panic() 187 int i; in print_tainted() local 190 for (i = 0; i < ARRAY_SIZE(tnts); i++) { in print_tainted() 191 const struct tnt *t = &tnts[i]; in print_tainted() [all …]
|
D | sched_cpupri.c | 156 int i; in cpupri_init() local 160 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { in cpupri_init() 161 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; in cpupri_init() 171 for_each_possible_cpu(i) in cpupri_init() 172 cp->cpu_to_pri[i] = CPUPRI_INVALID; in cpupri_init() 176 for (i--; i >= 0; i--) in cpupri_init() 177 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init() 187 int i; in cpupri_cleanup() local 189 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) in cpupri_cleanup() 190 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
|
D | marker.c | 128 int i; in marker_probe_cb() local 142 for (i = 0; multi[i].func; i++) { in marker_probe_cb() 144 multi[i].func(multi[i].probe_private, call_private, in marker_probe_cb() 182 int i; in marker_probe_cb_noarg() local 196 for (i = 0; multi[i].func; i++) in marker_probe_cb_noarg() 197 multi[i].func(multi[i].probe_private, call_private, in marker_probe_cb_noarg() 215 int i; in debug_print_probes() local 225 for (i = 0; entry->multi[i].func; i++) in debug_print_probes() 226 printk(KERN_DEBUG "Multi probe %d : %p %p\n", i, in debug_print_probes() 227 entry->multi[i].func, in debug_print_probes() [all …]
|
D | printk.c | 302 unsigned i, j, limit, count; in do_syslog() local 331 i = 0; in do_syslog() 333 while (!error && (log_start != log_end) && i < len) { in do_syslog() 339 i++; in do_syslog() 345 error = i; in do_syslog() 376 for (i = 0; i < count && !error; i++) { in do_syslog() 377 j = limit-1-i; in do_syslog() 382 error = __put_user(c,&buf[count-1-i]); in do_syslog() 389 error = i; in do_syslog() 390 if (i != count) { in do_syslog() [all …]
|
D | params.c | 42 unsigned int i; in parameq() local 43 for (i = 0; dash2underscore(input[i]) == paramname[i]; i++) in parameq() 44 if (input[i] == '\0') in parameq() 55 unsigned int i; in parse_one() local 58 for (i = 0; i < num_params; i++) { in parse_one() 59 if (parameq(param, params[i].name)) { in parse_one() 61 params[i].set); in parse_one() 62 return params[i].set(val, ¶ms[i]); in parse_one() 79 unsigned int i, equals = 0; in next_arg() local 89 for (i = 0; args[i]; i++) { in next_arg() [all …]
|
D | cgroup.c | 203 int i; in css_set_hash() local 207 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) in css_set_hash() 208 tmp += (unsigned long)css[i]; in css_set_hash() 257 int i; in __put_css_set() local 274 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { in __put_css_set() 275 struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup); in __put_css_set() 323 int i; in find_existing_css_set() local 331 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { in find_existing_css_set() 332 if (root->subsys_bits & (1UL << i)) { in find_existing_css_set() 336 template[i] = cgrp->subsys[i]; in find_existing_css_set() [all …]
|
D | lockdep.c | 142 int i; in lock_point() local 144 for (i = 0; i < LOCKSTAT_POINTS; i++) { in lock_point() 145 if (points[i] == 0) { in lock_point() 146 points[i] = ip; in lock_point() 149 if (points[i] == ip) in lock_point() 153 return i; in lock_point() 179 int cpu, i; in lock_stats() local 186 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) in lock_stats() 187 stats.contention_point[i] += pcs->contention_point[i]; in lock_stats() 189 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) in lock_stats() [all …]
|
D | kexec.c | 123 unsigned long i; in do_kimage_alloc() local 169 for (i = 0; i < nr_segments; i++) { in do_kimage_alloc() 172 mstart = image->segment[i].mem; in do_kimage_alloc() 173 mend = mstart + image->segment[i].memsz; in do_kimage_alloc() 186 for (i = 0; i < nr_segments; i++) { in do_kimage_alloc() 190 mstart = image->segment[i].mem; in do_kimage_alloc() 191 mend = mstart + image->segment[i].memsz; in do_kimage_alloc() 192 for (j = 0; j < i; j++) { in do_kimage_alloc() 208 for (i = 0; i < nr_segments; i++) { in do_kimage_alloc() 209 if (image->segment[i].bufsz > image->segment[i].memsz) in do_kimage_alloc() [all …]
|
D | rtmutex-tester.c | 58 int i, id, ret = -EINVAL; in handle_op() local 71 for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) { in handle_op() 72 if (td->mutexes[i] == 4) { in handle_op() 73 rt_mutex_unlock(&mutexes[i]); in handle_op() 74 td->mutexes[i] = 0; in handle_op() 369 int i; in sysfs_test_status() local 383 for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--) in sysfs_test_status() 384 curr += sprintf(curr, "%d", td->mutexes[i]); in sysfs_test_status() 415 int ret, i; in init_rttest() local 419 for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) in init_rttest() [all …]
|
D | uid16.c | 138 int i; in groups16_to_user() local 141 for (i = 0; i < group_info->ngroups; i++) { in groups16_to_user() 142 group = high2lowgid(GROUP_AT(group_info, i)); in groups16_to_user() 143 if (put_user(group, grouplist+i)) in groups16_to_user() 153 int i; in groups16_from_user() local 156 for (i = 0; i < group_info->ngroups; i++) { in groups16_from_user() 157 if (get_user(group, grouplist+i)) in groups16_from_user() 159 GROUP_AT(group_info, i) = low2highgid(group); in groups16_from_user() 168 int i; in SYSCALL_DEFINE2() local 173 i = cred->group_info->ngroups; in SYSCALL_DEFINE2() [all …]
|
D | pid.c | 127 int i, offset, max_scan, pid, last = pid_ns->last_pid; in alloc_pidmap() local 136 for (i = 0; i <= max_scan; ++i) { in alloc_pidmap() 168 (i != max_scan || pid < last || in alloc_pidmap() 228 int i; in free_pid() local 232 for (i = 0; i <= pid->level; i++) in free_pid() 233 hlist_del_rcu(&pid->numbers[i].pid_chain); in free_pid() 236 for (i = 0; i <= pid->level; i++) in free_pid() 237 free_pidmap(pid->numbers + i); in free_pid() 246 int i, nr; in alloc_pid() local 255 for (i = ns->level; i >= 0; i--) { in alloc_pid() [all …]
|
D | profile.c | 275 int i, j, cpu; in profile_flip_buffers() local 283 for (i = 0; i < NR_PROFILE_HIT; ++i) { in profile_flip_buffers() 284 if (!hits[i].hits) { in profile_flip_buffers() 285 if (hits[i].pc) in profile_flip_buffers() 286 hits[i].pc = 0; in profile_flip_buffers() 289 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); in profile_flip_buffers() 290 hits[i].hits = hits[i].pc = 0; in profile_flip_buffers() 298 int i, cpu; in profile_discard_flip_buffers() local 301 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 305 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() [all …]
|
D | rcutree.c | 797 int i; in __rcu_offline_cpu() local 847 for (i = 0; i < RCU_NEXT_SIZE; i++) in __rcu_offline_cpu() 848 rdp->nxttail[i] = &rdp->nxtlist; in __rcu_offline_cpu() 1321 int i; in rcu_init_percpu_data() local 1338 for (i = 0; i < RCU_NEXT_SIZE; i++) in rcu_init_percpu_data() 1339 rdp->nxttail[i] = &rdp->nxtlist; in rcu_init_percpu_data() 1423 int i; in rcu_init_levelspread() local 1425 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) in rcu_init_levelspread() 1426 rsp->levelspread[i] = CONFIG_RCU_FANOUT; in rcu_init_levelspread() 1433 int i; in rcu_init_levelspread() local [all …]
|
D | relay.c | 136 unsigned int i, j, n_pages; in relay_alloc_buf() local 145 for (i = 0; i < n_pages; i++) { in relay_alloc_buf() 146 buf->page_array[i] = alloc_page(GFP_KERNEL); in relay_alloc_buf() 147 if (unlikely(!buf->page_array[i])) in relay_alloc_buf() 149 set_page_private(buf->page_array[i], (unsigned long)buf); in relay_alloc_buf() 160 for (j = 0; j < i; j++) in relay_alloc_buf() 215 unsigned int i; in relay_destroy_buf() local 219 for (i = 0; i < buf->page_count; i++) in relay_destroy_buf() 220 __free_page(buf->page_array[i]); in relay_destroy_buf() 357 size_t i; in __relay_reset() local [all …]
|
/kernel/irq/ |
D | autoprobe.c | 36 int i; in probe_irq_on() local 47 for_each_irq_desc_reverse(i, desc) { in probe_irq_on() 61 desc->chip->set_type(i, IRQ_TYPE_PROBE); in probe_irq_on() 62 desc->chip->startup(i); in probe_irq_on() 75 for_each_irq_desc_reverse(i, desc) { in probe_irq_on() 79 if (desc->chip->startup(i)) in probe_irq_on() 93 for_each_irq_desc(i, desc) { in probe_irq_on() 101 desc->chip->shutdown(i); in probe_irq_on() 103 if (i < 32) in probe_irq_on() 104 mask |= 1 << i; in probe_irq_on() [all …]
|
D | handle.c | 147 int i; in early_irq_init() local 154 for (i = 0; i < legacy_count; i++) { in early_irq_init() 155 desc[i].irq = i; in early_irq_init() 156 desc[i].kstat_irqs = kstat_irqs_legacy[i]; in early_irq_init() 157 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); in early_irq_init() 159 irq_desc_ptrs[i] = desc + i; in early_irq_init() 162 for (i = legacy_count; i < NR_IRQS; i++) in early_irq_init() 163 irq_desc_ptrs[i] = NULL; in early_irq_init() 234 int i; in early_irq_init() local 241 for (i = 0; i < count; i++) in early_irq_init() [all …]
|
/kernel/trace/ |
D | trace_stack.c | 44 int i; in check_stack() local 73 i = 0; in check_stack() 85 while (i < max_stack_trace.nr_entries) { in check_stack() 88 stack_dump_index[i] = this_size; in check_stack() 91 for (; p < top && i < max_stack_trace.nr_entries; p++) { in check_stack() 92 if (*p == stack_dump_trace[i]) { in check_stack() 93 this_size = stack_dump_index[i++] = in check_stack() 102 i++; in check_stack() 191 long i; in t_next() local 196 i = 0; in t_next() [all …]
|
D | trace_functions_graph.c | 79 int i; in print_graph_cpu() local 98 for (i = 0; i < log10_all - log10_this; i++) { in print_graph_cpu() 115 int i; in print_graph_proc() local 134 for (i = 0; i < spaces / 2; i++) { in print_graph_proc() 145 for (i = 0; i < spaces - (spaces / 2); i++) { in print_graph_proc() 286 int i; in print_graph_duration() local 311 for (i = len; i < 7; i++) { in print_graph_duration() 350 int i; in print_graph_entry_leaf() local 371 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { in print_graph_entry_leaf() 392 int i; in print_graph_entry_nested() local [all …]
|