/kernel/ |
D | range.c | 67 int i, j; in subtract_range() local 72 for (j = 0; j < az; j++) { in subtract_range() 73 if (!range[j].end) in subtract_range() 76 if (start <= range[j].start && end >= range[j].end) { in subtract_range() 77 range[j].start = 0; in subtract_range() 78 range[j].end = 0; in subtract_range() 82 if (start <= range[j].start && end < range[j].end && in subtract_range() 83 range[j].start < end) { in subtract_range() 84 range[j].start = end; in subtract_range() 89 if (start > range[j].start && end >= range[j].end && in subtract_range() [all …]
|
D | profile.c | 257 int i, j, cpu; in profile_flip_buffers() local 260 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers() 264 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 296 int i, j, cpu; in do_profile_hits() local 315 for (j = 0; j < PROFILE_GRPSZ; ++j) { in do_profile_hits() 316 if (hits[i + j].pc == pc) { in do_profile_hits() 317 hits[i + j].hits += nr_hits; in do_profile_hits() 319 } else if (!hits[i + j].hits) { in do_profile_hits() 320 hits[i + j].pc = pc; in do_profile_hits() 321 hits[i + j].hits = nr_hits; in do_profile_hits()
|
D | kexec_file.c | 690 int ret = 0, i, j, zero_buf_sz, sha_region_sz; in kexec_calculate_store_digests() local 735 for (j = i = 0; i < image->nr_segments; i++) { in kexec_calculate_store_digests() 770 sha_regions[j].start = ksegment->mem; in kexec_calculate_store_digests() 771 sha_regions[j].len = ksegment->memsz; in kexec_calculate_store_digests() 772 j++; in kexec_calculate_store_digests() 1166 int i, j; in crash_exclude_mem_range() local 1191 for (j = i; j < mem->nr_ranges - 1; j++) { in crash_exclude_mem_range() 1192 mem->ranges[j].start = in crash_exclude_mem_range() 1193 mem->ranges[j+1].start; in crash_exclude_mem_range() 1194 mem->ranges[j].end = in crash_exclude_mem_range() [all …]
|
D | smp.c | 250 unsigned int i, j, k; in cfd_seq_data_add() local 258 for (j = 0; j < *n_data; j++) { in cfd_seq_data_add() 259 if (new[i].u.cnt == data[j].u.cnt) { in cfd_seq_data_add() 262 data[j].val = new[i].val; in cfd_seq_data_add() 265 if (new[i].u.cnt < data[j].u.cnt) { in cfd_seq_data_add() 266 for (k = *n_data; k > j; k--) in cfd_seq_data_add() 268 data[j].val = new[i].val; in cfd_seq_data_add() 273 if (j == *n_data) { in cfd_seq_data_add() 274 data[j].val = new[i].val; in cfd_seq_data_add()
|
D | audit_tree.c | 296 int i, j; in replace_chunk() local 302 for (i = j = 0; j < old->count; i++, j++) { in replace_chunk() 303 if (!old->owners[j].owner) { in replace_chunk() 307 owner = old->owners[j].owner; in replace_chunk() 309 new->owners[i].index = old->owners[j].index - j + i; in replace_chunk() 313 list_replace_init(&old->owners[j].list, &new->owners[i].list); in replace_chunk()
|
D | kexec_core.c | 189 unsigned long j; in sanity_check_segment_list() local 193 for (j = 0; j < i; j++) { in sanity_check_segment_list() 196 pstart = image->segment[j].mem; in sanity_check_segment_list() 197 pend = pstart + image->segment[j].memsz; in sanity_check_segment_list()
|
/kernel/sched/ |
D | topology.c | 1654 int i,j; in sched_numa_warn() local 1665 for (j = 0; j < nr_node_ids; j++) in sched_numa_warn() 1666 printk(KERN_CONT "%02d ", node_distance(i,j)); in sched_numa_warn() 1747 int i, j; in sched_init_numa() local 1759 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa() 1760 int distance = node_distance(i, j); in sched_init_numa() 1782 for (i = 0, j = 0; i < nr_levels; i++, j++) { in sched_init_numa() 1783 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); in sched_init_numa() 1784 sched_domains_numa_distance[i] = j; in sched_init_numa() 1821 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa() [all …]
|
/kernel/rcu/ |
D | tree_stall.h | 64 unsigned long j = jiffies; in rcu_gp_might_be_stalled() local 75 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); in rcu_gp_might_be_stalled() 144 unsigned long j = jiffies; in record_gp_stall_check_time() local 147 WRITE_ONCE(rcu_state.gp_start, j); in record_gp_stall_check_time() 151 WRITE_ONCE(rcu_state.jiffies_stall, j + j1); in record_gp_stall_check_time() 152 rcu_state.jiffies_resched = j + j1 / 2; in record_gp_stall_check_time() 170 unsigned long j; in rcu_stall_kick_kthreads() local 174 j = READ_ONCE(rcu_state.jiffies_kick_kthreads); in rcu_stall_kick_kthreads() 175 if (time_after(jiffies, j) && rcu_state.gp_kthread && in rcu_stall_kick_kthreads() 181 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); in rcu_stall_kick_kthreads() [all …]
|
D | rcu_segcblist.c | 484 int i, j; in rcu_segcblist_advance() local 506 for (j = RCU_WAIT_TAIL; j < i; j++) in rcu_segcblist_advance() 507 WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]); in rcu_segcblist_advance() 515 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { in rcu_segcblist_advance() 516 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) in rcu_segcblist_advance() 518 WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]); in rcu_segcblist_advance() 519 rcu_segcblist_move_seglen(rsclp, i, j); in rcu_segcblist_advance() 520 rsclp->gp_seq[j] = rsclp->gp_seq[i]; in rcu_segcblist_advance() 541 int i, j; in rcu_segcblist_accelerate() local 585 for (j = i + 1; j <= RCU_NEXT_TAIL; j++) in rcu_segcblist_accelerate() [all …]
|
D | update.c | 485 int j; in __wait_rcu_gp() local 494 for (j = 0; j < i; j++) in __wait_rcu_gp() 495 if (crcu_array[j] == crcu_array[i]) in __wait_rcu_gp() 497 if (j == i) { in __wait_rcu_gp() 509 for (j = 0; j < i; j++) in __wait_rcu_gp() 510 if (crcu_array[j] == crcu_array[i]) in __wait_rcu_gp() 512 if (j == i) { in __wait_rcu_gp()
|
D | tree_nocb.h | 336 unsigned long j, bool lazy) in rcu_nocb_do_flush_bypass() argument 366 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_do_flush_bypass() 380 unsigned long j, bool lazy) in rcu_nocb_flush_bypass() argument 386 return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy); in rcu_nocb_flush_bypass() 393 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j) in rcu_nocb_try_flush_bypass() argument 399 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false)); in rcu_nocb_try_flush_bypass() 426 unsigned long j = jiffies; in rcu_nocb_try_bypass() local 457 if (j == rdp->nocb_nobypass_last) { in rcu_nocb_try_bypass() 460 WRITE_ONCE(rdp->nocb_nobypass_last, j); in rcu_nocb_try_bypass() 481 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false)); in rcu_nocb_try_bypass() [all …]
|
D | tree.c | 499 unsigned long j; in adjust_jiffies_till_sched_qs() local 507 j = READ_ONCE(jiffies_till_first_fqs) + in adjust_jiffies_till_sched_qs() 509 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) in adjust_jiffies_till_sched_qs() 510 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; in adjust_jiffies_till_sched_qs() 511 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); in adjust_jiffies_till_sched_qs() 512 WRITE_ONCE(jiffies_to_sched_qs, j); in adjust_jiffies_till_sched_qs() 517 ulong j; in param_set_first_fqs_jiffies() local 518 int ret = kstrtoul(val, 0, &j); in param_set_first_fqs_jiffies() 521 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); in param_set_first_fqs_jiffies() 529 ulong j; in param_set_next_fqs_jiffies() local [all …]
|
D | rcuscale.c | 700 int j; in rcu_scale_cleanup() local 741 j = writer_n_durations[i]; in rcu_scale_cleanup() 743 scale_type, SCALE_FLAG, i, j); in rcu_scale_cleanup() 744 ngps += j; in rcu_scale_cleanup() 762 for (j = 0; j < writer_n_durations[i]; j++) { in rcu_scale_cleanup() 763 wdp = &wdpp[j]; in rcu_scale_cleanup() 766 i, j, *wdp); in rcu_scale_cleanup() 767 if (j % 100 == 0) in rcu_scale_cleanup()
|
D | rcu.h | 470 void rcu_lazy_set_jiffies_till_flush(unsigned long j); 473 static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { } in rcu_lazy_set_jiffies_till_flush() argument 539 static inline void rcu_fwd_progress_check(unsigned long j) { } in rcu_fwd_progress_check() argument 548 void rcu_fwd_progress_check(unsigned long j);
|
D | rcutorture.c | 930 unsigned long j; in rcu_torture_boost_failed() local 942 j = jiffies; in rcu_torture_boost_failed() 944 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) in rcu_torture_boost_failed() 1550 int j; in rcutorture_loop_extend() local 1559 for (j = 0; j < i; j++) { in rcutorture_loop_extend() 1561 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); in rcutorture_loop_extend() 1563 return &rtrsp[j]; in rcutorture_loop_extend() 2155 int j; in rcu_torture_fwd_cb_hist() local 2163 for (j = 0; j <= i; j++) { in rcu_torture_fwd_cb_hist() 2164 gps = rfp->n_launders_hist[j].launder_gp_seq; in rcu_torture_fwd_cb_hist() [all …]
|
/kernel/time/ |
D | timer.c | 280 static unsigned long round_jiffies_common(unsigned long j, int cpu, in round_jiffies_common() argument 284 unsigned long original = j; in round_jiffies_common() 294 j += cpu * 3; in round_jiffies_common() 296 rem = j % HZ; in round_jiffies_common() 306 j = j - rem; in round_jiffies_common() 308 j = j - rem + HZ; in round_jiffies_common() 311 j -= cpu * 3; in round_jiffies_common() 317 return time_is_after_jiffies(j) ? j : original; in round_jiffies_common() 340 unsigned long __round_jiffies(unsigned long j, int cpu) in __round_jiffies() argument 342 return round_jiffies_common(j, cpu, false); in __round_jiffies() [all …]
|
D | time.c | 374 unsigned int jiffies_to_msecs(const unsigned long j) in jiffies_to_msecs() argument 377 return (MSEC_PER_SEC / HZ) * j; in jiffies_to_msecs() 379 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); in jiffies_to_msecs() 382 return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> in jiffies_to_msecs() 385 return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); in jiffies_to_msecs() 391 unsigned int jiffies_to_usecs(const unsigned long j) in jiffies_to_usecs() argument 400 return (USEC_PER_SEC / HZ) * j; in jiffies_to_usecs() 403 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; in jiffies_to_usecs() 405 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; in jiffies_to_usecs() 691 u64 jiffies64_to_nsecs(u64 j) in jiffies64_to_nsecs() argument [all …]
|
/kernel/irq/ |
D | proc.c | 465 int i = *(loff_t *) v, j; in show_interrupts() local 477 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) in show_interrupts() 478 j *= 10; in show_interrupts() 481 for_each_online_cpu(j) in show_interrupts() 482 seq_printf(p, "CPU%-8d", j); in show_interrupts() 492 for_each_online_cpu(j) in show_interrupts() 493 any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); in show_interrupts() 500 for_each_online_cpu(j) in show_interrupts() 502 *per_cpu_ptr(desc->kstat_irqs, j) : 0); in show_interrupts()
|
/kernel/cgroup/ |
D | misc.c | 143 struct misc_cg *i, *j; in misc_cg_try_charge() local 174 for (j = cg; j != i; j = parent_misc(j)) in misc_cg_try_charge() 175 misc_cg_cancel_charge(type, j, amount); in misc_cg_try_charge()
|
/kernel/trace/ |
D | trace_probe_tmpl.h | 262 int i, j; in print_probe_args() local 275 for (j = 0; j < a->count; j++) { in print_probe_args() 278 trace_seq_putc(s, j == a->count - 1 ? '}' : ','); in print_probe_args()
|
/kernel/bpf/ |
D | verifier.c | 719 int j; in print_verifier_state() local 721 for (j = 0; j < BPF_REG_SIZE; j++) { in print_verifier_state() 722 if (state->stack[i].slot_type[j] != STACK_INVALID) in print_verifier_state() 724 types_buf[j] = slot_type_char[ in print_verifier_state() 725 state->stack[i].slot_type[j]]; in print_verifier_state() 2386 int i, j; in mark_all_scalars_precise() local 2397 for (j = 0; j < BPF_REG_FP; j++) { in mark_all_scalars_precise() 2398 reg = &func->regs[j]; in mark_all_scalars_precise() 2403 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise() 2404 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_precise() [all …]
|
D | percpu_freelist.c | 105 unsigned int cpu, cpu_idx, i, j, n, m; in pcpu_freelist_populate() local 113 j = n + (cpu_idx < m ? 1 : 0); in pcpu_freelist_populate() 114 for (i = 0; i < j; i++) { in pcpu_freelist_populate()
|
/kernel/power/ |
D | snapshot.c | 2103 int j; in pack_pfns() local 2105 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { in pack_pfns() 2106 buf[j] = memory_bm_next_pfn(bm); in pack_pfns() 2107 if (unlikely(buf[j] == BM_END_OF_MAP)) in pack_pfns() 2252 int j; in unpack_orig_pfns() local 2254 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { in unpack_orig_pfns() 2255 if (unlikely(buf[j] == BM_END_OF_MAP)) in unpack_orig_pfns() 2258 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) in unpack_orig_pfns() 2259 memory_bm_set_bit(bm, buf[j]); in unpack_orig_pfns()
|
/kernel/locking/ |
D | locktorture.c | 902 int i, j; in lock_torture_init() local 1077 for (i = 0, j = 0; i < cxt.nrealwriters_stress || in lock_torture_init() 1078 j < cxt.nrealreaders_stress; i++, j++) { in lock_torture_init() 1089 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) in lock_torture_init() 1092 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], in lock_torture_init() 1093 reader_tasks[j]); in lock_torture_init()
|
D | lockdep.c | 3559 int i, j, id; in check_no_collision() local 3568 for (j = 0; j < chain->depth - 1; j++, i++) { in check_no_collision() 3571 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { in check_no_collision() 3620 int i, j; in add_chain_cache() local 3648 j = alloc_chain_hlocks(chain->depth); in add_chain_cache() 3649 if (j < 0) { in add_chain_cache() 3658 chain->base = j; in add_chain_cache() 3659 for (j = 0; j < chain->depth - 1; j++, i++) { in add_chain_cache() 3662 chain_hlocks[chain->base + j] = lock_id; in add_chain_cache() 3664 chain_hlocks[chain->base + j] = hlock_id(hlock); in add_chain_cache() [all …]
|