Home
last modified time | relevance | path

Searched refs:shift (Results 1 – 20 of 20) sorted by relevance

/kernel/time/
Dvsyscall.c27 vdata[CS_HRES_COARSE].shift = tk->tkr_mono.shift; in update_vdso_data()
31 vdata[CS_RAW].shift = tk->tkr_raw.shift; in update_vdso_data()
38 nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); in update_vdso_data()
39 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { in update_vdso_data()
40 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); in update_vdso_data()
49 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; in update_vdso_data()
55 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { in update_vdso_data()
56 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); in update_vdso_data()
94 vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in update_vsyscall()
99 nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in update_vsyscall()
Dtimekeeping.c102 .shift = 0, \
119 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime()
120 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime()
123 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime()
124 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime()
134 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_xtime()
141 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime()
147 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add()
327 tmp <<= clock->shift; in tk_setup_internals()
344 int shift_change = clock->shift - old_clock->shift; in tk_setup_internals()
[all …]
Dsched_clock.c68 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) in cyc_to_ns() argument
70 return (cyc * mult) >> shift; in cyc_to_ns()
95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); in sched_clock()
138 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); in update_sched_clock()
183 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); in sched_clock_register()
189 rd.shift = new_shift; in sched_clock_register()
Dclocksource.c47 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) in clocks_calc_mult_shift() argument
74 *shift = sft; in clocks_calc_mult_shift()
238 watchdog->shift); in cs_watchdog_read()
257 wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); in cs_watchdog_read()
369 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_verify_percpu()
460 watchdog->shift); in clocksource_watchdog()
463 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog()
845 suspend_clocksource->shift); in clocksource_stop_suspend_timing()
927 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) in clocks_calc_max_nsecs() argument
945 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); in clocks_calc_max_nsecs()
[all …]
Dclockevents.c35 u64 clc = (u64) latch << evt->shift; in cev_delta2ns()
46 if ((clc >> evt->shift) != (u64)latch) in cev_delta2ns()
69 (!ismax || evt->mult <= (1ULL << evt->shift))) in cev_delta2ns()
247 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta()
286 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta()
333 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_event()
Dtimecounter.c15 tc->mask = (1ULL << cc->shift) - 1; in timecounter_init()
74 ns = ((ns * cc->mult) - frac) >> cc->shift; in cc_cyc2ns_backwards()
Dtime.c308 txc->shift = tx32.shift; in get_old_timex32()
337 tx32.shift = txc->shift; in put_old_timex32()
Dtimeconst.bc29 /* Compute the appropriate mul/adj values as well as a shift count,
31 a shift value will be correct in the signed integer range and off
Dtick-broadcast-hrtimer.c95 .shift = 0,
Djiffies.c39 .shift = JIFFIES_SHIFT,
Dclocksource-wdtest.c47 .shift = JIFFIES_SHIFT,
Dntp.c200 txc->shift = pps_shift; in pps_fill_timex()
230 txc->shift = 0; in pps_fill_timex()
Dtimer_list.c201 SEQ_printf(m, " shift: %u\n", dev->shift); in print_tickdevice()
/kernel/bpf/
Dtnum.c37 struct tnum tnum_lshift(struct tnum a, u8 shift) in tnum_lshift() argument
39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift()
42 struct tnum tnum_rshift(struct tnum a, u8 shift) in tnum_rshift() argument
44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift()
Dverifier.c13867 u8 shift = bpf_ctx_narrow_access_offset( in convert_ctx_accesses() local
13869 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { in convert_ctx_accesses()
13874 if (shift) in convert_ctx_accesses()
13877 shift); in convert_ctx_accesses()
13881 if (shift) in convert_ctx_accesses()
13884 shift); in convert_ctx_accesses()
/kernel/sched/
Dloadavg.c71 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) in get_avenrun() argument
73 loads[0] = (avenrun[0] + offset) << shift; in get_avenrun()
74 loads[1] = (avenrun[1] + offset) << shift; in get_avenrun()
75 loads[2] = (avenrun[2] + offset) << shift; in get_avenrun()
Dwait_bit.c14 const int shift = BITS_PER_LONG == 32 ? 5 : 6; in bit_waitqueue() local
15 unsigned long val = (unsigned long)word << shift | bit; in bit_waitqueue()
Dsched.h235 #define shr_bound(val, shift) \ argument
236 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
Dfair.c319 int shift = WMULT_SHIFT; in __calc_delta() local
326 shift -= fs; in __calc_delta()
335 shift -= fs; in __calc_delta()
339 return mul_u64_u32_shr(delta_exec, fact, shift); in __calc_delta()
/kernel/rcu/
Dtasks.h230 int shift; in cblist_init_generic() local
243 shift = ilog2(nr_cpu_ids / lim); in cblist_init_generic()
244 if (((nr_cpu_ids - 1) >> shift) >= lim) in cblist_init_generic()
245 shift++; in cblist_init_generic()
246 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); in cblist_init_generic()