/kernel/ |
D | range.c | 65 int i, j; in subtract_range() local 70 for (j = 0; j < az; j++) { in subtract_range() 71 if (!range[j].end) in subtract_range() 74 if (start <= range[j].start && end >= range[j].end) { in subtract_range() 75 range[j].start = 0; in subtract_range() 76 range[j].end = 0; in subtract_range() 80 if (start <= range[j].start && end < range[j].end && in subtract_range() 81 range[j].start < end) { in subtract_range() 82 range[j].start = end; in subtract_range() 87 if (start > range[j].start && end >= range[j].end && in subtract_range() [all …]
|
D | profile.c | 246 int i, j, cpu; in profile_flip_buffers() local 249 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers() 253 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 285 int i, j, cpu; in do_profile_hits() local 304 for (j = 0; j < PROFILE_GRPSZ; ++j) { in do_profile_hits() 305 if (hits[i + j].pc == pc) { in do_profile_hits() 306 hits[i + j].hits += nr_hits; in do_profile_hits() 308 } else if (!hits[i + j].hits) { in do_profile_hits() 309 hits[i + j].pc = pc; in do_profile_hits() 310 hits[i + j].hits = nr_hits; in do_profile_hits()
|
D | audit_tree.c | 225 int i, j; in untag_chunk() local 275 for (i = j = 0; j <= size; i++, j++) { in untag_chunk() 277 if (&chunk->owners[j] == p) { in untag_chunk() 282 s = chunk->owners[j].owner; in untag_chunk() 284 new->owners[i].index = chunk->owners[j].index - j + i; in untag_chunk() 288 list_replace_init(&chunk->owners[j].list, &new->owners[i].list); in untag_chunk()
|
D | compat.c | 893 int i, j; in compat_get_bitmap() local 909 for (j = 0; j < sizeof(m)/sizeof(um); j++) { in compat_get_bitmap() 924 m |= (long)um << (j * BITS_PER_COMPAT_LONG); in compat_get_bitmap() 935 int i, j; in compat_put_bitmap() local 951 for (j = 0; j < sizeof(m)/sizeof(um); j++) { in compat_put_bitmap()
|
D | kexec_core.c | 184 unsigned long j; in sanity_check_segment_list() local 188 for (j = 0; j < i; j++) { in sanity_check_segment_list() 191 pstart = image->segment[j].mem; in sanity_check_segment_list() 192 pend = pstart + image->segment[j].memsz; in sanity_check_segment_list()
|
D | kexec_file.c | 553 int ret = 0, i, j, zero_buf_sz, sha_region_sz; in kexec_calculate_store_digests() local 596 for (j = i = 0; i < image->nr_segments; i++) { in kexec_calculate_store_digests() 631 sha_regions[j].start = ksegment->mem; in kexec_calculate_store_digests() 632 sha_regions[j].len = ksegment->memsz; in kexec_calculate_store_digests() 633 j++; in kexec_calculate_store_digests()
|
D | cpuset.c | 638 int i, j, k; /* indices for partition finding loops */ in generate_sched_domains() local 712 for (j = 0; j < csn; j++) { in generate_sched_domains() 713 struct cpuset *b = csa[j]; in generate_sched_domains() 768 for (j = i; j < csn; j++) { in generate_sched_domains() 769 struct cpuset *b = csa[j]; in generate_sched_domains()
|
D | auditfilter.c | 300 int j; in audit_to_entry_common() local 301 for (j = 0; j < AUDIT_BITMASK_SIZE; j++) in audit_to_entry_common() 302 entry->rule.mask[j] |= class[j]; in audit_to_entry_common()
|
D | relay.c | 126 unsigned int i, j, n_pages; in relay_alloc_buf() local 150 for (j = 0; j < i; j++) in relay_alloc_buf() 151 __free_page(buf->page_array[j]); in relay_alloc_buf()
|
D | tracepoint.c | 201 int j = 0; in func_remove() local 210 new[j++] = old[i]; in func_remove()
|
/kernel/time/ |
D | timer.c | 153 static unsigned long round_jiffies_common(unsigned long j, int cpu, in round_jiffies_common() argument 157 unsigned long original = j; in round_jiffies_common() 167 j += cpu * 3; in round_jiffies_common() 169 rem = j % HZ; in round_jiffies_common() 179 j = j - rem; in round_jiffies_common() 181 j = j - rem + HZ; in round_jiffies_common() 184 j -= cpu * 3; in round_jiffies_common() 190 return time_is_after_jiffies(j) ? j : original; in round_jiffies_common() 213 unsigned long __round_jiffies(unsigned long j, int cpu) in __round_jiffies() argument 215 return round_jiffies_common(j, cpu, false); in __round_jiffies() [all …]
|
D | time.c | 254 unsigned int jiffies_to_msecs(const unsigned long j) in jiffies_to_msecs() argument 257 return (MSEC_PER_SEC / HZ) * j; in jiffies_to_msecs() 259 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); in jiffies_to_msecs() 262 return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> in jiffies_to_msecs() 265 return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); in jiffies_to_msecs() 271 unsigned int jiffies_to_usecs(const unsigned long j) in jiffies_to_usecs() argument 280 return (USEC_PER_SEC / HZ) * j; in jiffies_to_usecs() 283 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; in jiffies_to_usecs() 285 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; in jiffies_to_usecs()
|
/kernel/irq/ |
D | proc.c | 448 int i = *(loff_t *) v, j; in show_interrupts() local 460 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) in show_interrupts() 461 j *= 10; in show_interrupts() 464 for_each_online_cpu(j) in show_interrupts() 465 seq_printf(p, "CPU%-8d", j); in show_interrupts() 475 for_each_online_cpu(j) in show_interrupts() 476 any_count |= kstat_irqs_cpu(i, j); in show_interrupts() 482 for_each_online_cpu(j) in show_interrupts() 483 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); in show_interrupts()
|
D | irqdesc.c | 515 int i, j; in irq_free_hwirqs() local 517 for (i = from, j = cnt; j > 0; i++, j--) { in irq_free_hwirqs()
|
/kernel/rcu/ |
D | tree.c | 1190 unsigned long j = jiffies; in record_gp_stall_check_time() local 1193 rsp->gp_start = j; in record_gp_stall_check_time() 1196 WRITE_ONCE(rsp->jiffies_stall, j + j1); in record_gp_stall_check_time() 1197 rsp->jiffies_resched = j + j1 / 2; in record_gp_stall_check_time() 1207 unsigned long j; in rcu_check_gp_kthread_starvation() local 1209 j = jiffies; in rcu_check_gp_kthread_starvation() 1211 if (j - gpa > 2 * HZ) in rcu_check_gp_kthread_starvation() 1213 rsp->name, j - gpa, in rcu_check_gp_kthread_starvation() 1245 unsigned long j; in print_other_cpu_stall() local 1297 j = jiffies; in print_other_cpu_stall() [all …]
|
D | tiny_plugin.h | 140 unsigned long j; in check_cpu_stall() local 146 j = jiffies; in check_cpu_stall() 148 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { in check_cpu_stall() 155 } else if (ULONG_CMP_GE(j, js)) { in check_cpu_stall()
|
D | tree_plugin.h | 2606 unsigned long j; in rcu_sysidle_enter() local 2631 j = jiffies; in rcu_sysidle_enter() 2632 WRITE_ONCE(rdtp->dynticks_idle_jiffies, j); in rcu_sysidle_enter() 2736 unsigned long j; in rcu_sysidle_check_cpu() local 2763 j = READ_ONCE(rdtp->dynticks_idle_jiffies); in rcu_sysidle_check_cpu() 2765 if (ULONG_CMP_LT(*maxj, j)) in rcu_sysidle_check_cpu() 2766 *maxj = j; in rcu_sysidle_check_cpu() 2797 static void rcu_sysidle(unsigned long j) in rcu_sysidle() argument 2813 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) in rcu_sysidle() 2824 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) in rcu_sysidle()
|
D | rcutorture.c | 823 int j; in rcu_torture_cbflood() local 845 for (j = 0; j < cbflood_n_per_burst; j++) { in rcu_torture_cbflood() 846 cur_ops->call(&rhp[i * cbflood_n_per_burst + j], in rcu_torture_cbflood()
|
/kernel/gcov/ |
D | gcc_3_4.c | 204 unsigned int j; in gcov_info_add() local 207 for (j = 0; j < dest->counts[i].num; j++) { in gcov_info_add() 208 dest->counts[i].values[j] += in gcov_info_add() 209 source->counts[i].values[j]; in gcov_info_add()
|
/kernel/sched/ |
D | core.c | 6565 int group, j; in build_sched_groups() local 6573 for_each_cpu(j, span) { in build_sched_groups() 6574 if (get_group(j, sdd, NULL) != group) in build_sched_groups() 6577 cpumask_set_cpu(j, covered); in build_sched_groups() 6578 cpumask_set_cpu(j, sched_group_cpus(sg)); in build_sched_groups() 6952 int i,j; in sched_numa_warn() local 6963 for (j = 0; j < nr_node_ids; j++) in sched_numa_warn() 6964 printk(KERN_CONT "%02d ", node_distance(i,j)); in sched_numa_warn() 7042 int i, j, k; in sched_init_numa() local 7057 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa() [all …]
|
D | cpufreq_schedutil.c | 338 unsigned int j; in sugov_next_freq_shared() local 340 for_each_cpu(j, policy->cpus) { in sugov_next_freq_shared() 341 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); in sugov_next_freq_shared()
|
/kernel/locking/ |
D | locktorture.c | 786 int i, j; in lock_torture_init() local 937 for (i = 0, j = 0; i < cxt.nrealwriters_stress || in lock_torture_init() 938 j < cxt.nrealreaders_stress; i++, j++) { in lock_torture_init() 949 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) in lock_torture_init() 952 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], in lock_torture_init() 953 reader_tasks[j]); in lock_torture_init()
|
D | lockdep.c | 2027 int i, j; in lookup_chain_cache() local 2092 for (j = 0; j < chain->depth - 1; j++, i++) { in lookup_chain_cache() 2094 chain_hlocks[chain->base + j] = lock_id; in lookup_chain_cache() 2096 chain_hlocks[chain->base + j] = class - lock_classes; in lookup_chain_cache() 3981 int i, j; in lockdep_reset_lock() local 3989 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { in lockdep_reset_lock() 3993 class = look_up_lock_class(lock, j); in lockdep_reset_lock() 4009 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) in lockdep_reset_lock() 4010 match |= class == lock->class_cache[j]; in lockdep_reset_lock()
|
/kernel/power/ |
D | snapshot.c | 1928 int j; in pack_pfns() local 1930 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { in pack_pfns() 1931 buf[j] = memory_bm_next_pfn(bm); in pack_pfns() 1932 if (unlikely(buf[j] == BM_END_OF_MAP)) in pack_pfns() 1935 page_key_read(buf + j); in pack_pfns() 2089 int j; in unpack_orig_pfns() local 2091 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { in unpack_orig_pfns() 2092 if (unlikely(buf[j] == BM_END_OF_MAP)) in unpack_orig_pfns() 2096 page_key_memorize(buf + j); in unpack_orig_pfns() 2098 if (memory_bm_pfn_present(bm, buf[j])) in unpack_orig_pfns() [all …]
|
D | suspend.c | 139 int j = 0; in suspend_set_ops() local 146 pm_states[i] = pm_labels[j++]; in suspend_set_ops() 149 j++; in suspend_set_ops() 152 pm_states[PM_SUSPEND_FREEZE] = pm_labels[j]; in suspend_set_ops()
|