/kernel/time/ |
D | vsyscall.c | 27 vdata[CS_HRES_COARSE].shift = tk->tkr_mono.shift; in update_vdso_data() 31 vdata[CS_RAW].shift = tk->tkr_raw.shift; in update_vdso_data() 38 nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); in update_vdso_data() 39 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { in update_vdso_data() 40 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); in update_vdso_data() 49 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; in update_vdso_data() 55 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { in update_vdso_data() 56 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); in update_vdso_data() 94 vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in update_vsyscall() 99 nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in update_vsyscall()
|
D | timekeeping.c | 102 .shift = 0, \ 119 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime() 120 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime() 123 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime() 124 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime() 134 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_xtime() 141 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime() 147 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add() 327 tmp <<= clock->shift; in tk_setup_internals() 344 int shift_change = clock->shift - old_clock->shift; in tk_setup_internals() [all …]
|
D | sched_clock.c | 68 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) in cyc_to_ns() argument 70 return (cyc * mult) >> shift; in cyc_to_ns() 95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); in sched_clock() 138 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); in update_sched_clock() 183 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); in sched_clock_register() 189 rd.shift = new_shift; in sched_clock_register()
|
D | clocksource.c | 47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) in clocks_calc_mult_shift() argument 74 *shift = sft; in clocks_calc_mult_shift() 238 watchdog->shift); in cs_watchdog_read() 257 wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); in cs_watchdog_read() 369 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_verify_percpu() 460 watchdog->shift); in clocksource_watchdog() 463 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog() 845 suspend_clocksource->shift); in clocksource_stop_suspend_timing() 927 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) in clocks_calc_max_nsecs() argument 945 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); in clocks_calc_max_nsecs() [all …]
|
D | clockevents.c | 35 u64 clc = (u64) latch << evt->shift; in cev_delta2ns() 46 if ((clc >> evt->shift) != (u64)latch) in cev_delta2ns() 69 (!ismax || evt->mult <= (1ULL << evt->shift))) in cev_delta2ns() 247 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta() 286 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta() 333 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_event()
|
D | timecounter.c | 15 tc->mask = (1ULL << cc->shift) - 1; in timecounter_init() 74 ns = ((ns * cc->mult) - frac) >> cc->shift; in cc_cyc2ns_backwards()
|
D | time.c | 308 txc->shift = tx32.shift; in get_old_timex32() 337 tx32.shift = txc->shift; in put_old_timex32()
|
D | timeconst.bc | 29 /* Compute the appropriate mul/adj values as well as a shift count, 31 a shift value will be correct in the signed integer range and off
|
D | tick-broadcast-hrtimer.c | 95 .shift = 0,
|
D | jiffies.c | 39 .shift = JIFFIES_SHIFT,
|
D | clocksource-wdtest.c | 47 .shift = JIFFIES_SHIFT,
|
D | ntp.c | 200 txc->shift = pps_shift; in pps_fill_timex() 230 txc->shift = 0; in pps_fill_timex()
|
D | timer_list.c | 201 SEQ_printf(m, " shift: %u\n", dev->shift); in print_tickdevice()
|
/kernel/bpf/ |
D | tnum.c | 37 struct tnum tnum_lshift(struct tnum a, u8 shift) in tnum_lshift() argument 39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift() 42 struct tnum tnum_rshift(struct tnum a, u8 shift) in tnum_rshift() argument 44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift()
|
D | verifier.c | 13867 u8 shift = bpf_ctx_narrow_access_offset( in convert_ctx_accesses() local 13869 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { in convert_ctx_accesses() 13874 if (shift) in convert_ctx_accesses() 13877 shift); in convert_ctx_accesses() 13881 if (shift) in convert_ctx_accesses() 13884 shift); in convert_ctx_accesses()
|
/kernel/sched/ |
D | loadavg.c | 71 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) in get_avenrun() argument 73 loads[0] = (avenrun[0] + offset) << shift; in get_avenrun() 74 loads[1] = (avenrun[1] + offset) << shift; in get_avenrun() 75 loads[2] = (avenrun[2] + offset) << shift; in get_avenrun()
|
D | wait_bit.c | 14 const int shift = BITS_PER_LONG == 32 ? 5 : 6; in bit_waitqueue() local 15 unsigned long val = (unsigned long)word << shift | bit; in bit_waitqueue()
|
D | sched.h | 235 #define shr_bound(val, shift) \ argument 236 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
|
D | fair.c | 319 int shift = WMULT_SHIFT; in __calc_delta() local 326 shift -= fs; in __calc_delta() 335 shift -= fs; in __calc_delta() 339 return mul_u64_u32_shr(delta_exec, fact, shift); in __calc_delta()
|
/kernel/rcu/ |
D | tasks.h | 230 int shift; in cblist_init_generic() local 243 shift = ilog2(nr_cpu_ids / lim); in cblist_init_generic() 244 if (((nr_cpu_ids - 1) >> shift) >= lim) in cblist_init_generic() 245 shift++; in cblist_init_generic() 246 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); in cblist_init_generic()
|