Lines Matching +full:timebase +full:- +full:frequency
1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
11 * to make clock more stable (2.4.0-test5). The only thing
20 * - improve precision and reproducibility of timebase frequency
22 * - for astronomical applications: add a new function to get
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
49 #include <linux/posix-timers.h>
72 #include <asm/asm-prototypes.h>
81 .name = "timebase",
145 * Factor for converting from cputime_t (timebase ticks) to
146 * microseconds. This is stored as 0.64 fixed-point binary fraction.
165 * or if that doesn't exist return the timebase value passed in.
186 u64 i = local_paca->dtl_ridx; in scan_dispatch_log()
187 struct dtl_entry *dtl = local_paca->dtl_curr; in scan_dispatch_log()
188 struct dtl_entry *dtl_end = local_paca->dispatch_log_end; in scan_dispatch_log()
189 struct lppaca *vpa = local_paca->lppaca_ptr; in scan_dispatch_log()
197 if (i == be64_to_cpu(vpa->dtl_idx)) in scan_dispatch_log()
199 while (i < be64_to_cpu(vpa->dtl_idx)) { in scan_dispatch_log()
200 dtb = be64_to_cpu(dtl->timebase); in scan_dispatch_log()
201 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + in scan_dispatch_log()
202 be32_to_cpu(dtl->ready_to_enqueue_time); in scan_dispatch_log()
204 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { in scan_dispatch_log()
206 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; in scan_dispatch_log()
207 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); in scan_dispatch_log()
218 dtl = local_paca->dispatch_log; in scan_dispatch_log()
220 local_paca->dtl_ridx = i; in scan_dispatch_log()
221 local_paca->dtl_curr = dtl; in scan_dispatch_log()
233 struct cpu_accounting_data *acct = &local_paca->accounting; in accumulate_stolen_time()
243 sst = scan_dispatch_log(acct->starttime_user); in accumulate_stolen_time()
244 ust = scan_dispatch_log(acct->starttime); in accumulate_stolen_time()
245 acct->stime -= sst; in accumulate_stolen_time()
246 acct->utime -= ust; in accumulate_stolen_time()
247 acct->steal_time += ust + sst; in accumulate_stolen_time()
257 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) in calculate_stolen_time()
284 deltascaled = nowscaled - acct->startspurr; in vtime_delta_scaled()
285 acct->startspurr = nowscaled; in vtime_delta_scaled()
286 utime = acct->utime - acct->utime_sspurr; in vtime_delta_scaled()
287 acct->utime_sspurr = acct->utime; in vtime_delta_scaled()
294 * and user time (udelta) values obtained from the timebase in vtime_delta_scaled()
296 * the user ticks get saved up in paca->user_time_scaled to be in vtime_delta_scaled()
304 utime_scaled = deltascaled - stime_scaled; in vtime_delta_scaled()
309 acct->utime_scaled += utime_scaled; in vtime_delta_scaled()
325 stime = now - acct->starttime; in vtime_delta()
326 acct->starttime = now; in vtime_delta()
342 stime -= min(stime, steal_time); in vtime_account_kernel()
343 acct->steal_time += steal_time; in vtime_account_kernel()
345 if ((tsk->flags & PF_VCPU) && !irq_count()) { in vtime_account_kernel()
346 acct->gtime += stime; in vtime_account_kernel()
348 acct->utime_scaled += stime_scaled; in vtime_account_kernel()
352 acct->hardirq_time += stime; in vtime_account_kernel()
354 acct->softirq_time += stime; in vtime_account_kernel()
356 acct->stime += stime; in vtime_account_kernel()
359 acct->stime_scaled += stime_scaled; in vtime_account_kernel()
371 acct->idle_time += stime + steal_time; in vtime_account_idle()
378 if (acct->utime_scaled) in vtime_flush_scaled()
379 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); in vtime_flush_scaled()
380 if (acct->stime_scaled) in vtime_flush_scaled()
381 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); in vtime_flush_scaled()
383 acct->utime_scaled = 0; in vtime_flush_scaled()
384 acct->utime_sspurr = 0; in vtime_flush_scaled()
385 acct->stime_scaled = 0; in vtime_flush_scaled()
394 * get_paca()->user_time_scaled is up to date.
400 if (acct->utime) in vtime_flush()
401 account_user_time(tsk, cputime_to_nsecs(acct->utime)); in vtime_flush()
403 if (acct->gtime) in vtime_flush()
404 account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); in vtime_flush()
406 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) { in vtime_flush()
407 account_steal_time(cputime_to_nsecs(acct->steal_time)); in vtime_flush()
408 acct->steal_time = 0; in vtime_flush()
411 if (acct->idle_time) in vtime_flush()
412 account_idle_time(cputime_to_nsecs(acct->idle_time)); in vtime_flush()
414 if (acct->stime) in vtime_flush()
415 account_system_index_time(tsk, cputime_to_nsecs(acct->stime), in vtime_flush()
418 if (acct->hardirq_time) in vtime_flush()
419 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), in vtime_flush()
421 if (acct->softirq_time) in vtime_flush()
422 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), in vtime_flush()
427 acct->utime = 0; in vtime_flush()
428 acct->gtime = 0; in vtime_flush()
429 acct->idle_time = 0; in vtime_flush()
430 acct->stime = 0; in vtime_flush()
431 acct->hardirq_time = 0; in vtime_flush()
432 acct->softirq_time = 0; in vtime_flush()
453 while (mftb() - start < loops) in __delay()
472 return regs->link; in profile_pc()
482 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
509 #else /* 32-bit */
522 * 64-bit code that uses irq soft-mask can just cause an immediate in arch_irq_work_raise()
528 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on) in arch_irq_work_raise()
546 * timer_interrupt - gets called when the decrementer overflows,
576 /* Conditionally hard-enable interrupts now that the DEC has been in timer_interrupt()
599 if (evt->event_handler) in timer_interrupt()
600 evt->event_handler(evt); in timer_interrupt()
603 now = *next_tb - now; in timer_interrupt()
670 * Scheduler clock - returns current time in nanosec units.
674 * are 64-bit unsigned numbers.
678 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; in sched_clock()
685 * Running clock - attempts to give a view of time passing for a virtualised
693 * timebase into the VTB when it takes a guest off the CPU, reading the in running_clock()
701 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; in running_clock()
709 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL]; in running_clock()
719 /* The cpu node should have timebase and clock frequency properties */ in get_freq()
745 * The watchdog may have already been enabled by u-boot. So leave in start_cpu_decrementer()
758 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && in generic_calibrate_decr()
759 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { in generic_calibrate_decr()
761 printk(KERN_ERR "WARNING: Estimating decrementer frequency " in generic_calibrate_decr()
767 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && in generic_calibrate_decr()
768 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { in generic_calibrate_decr()
770 printk(KERN_ERR "WARNING: Estimating processor frequency " in generic_calibrate_decr()
780 return -ENODEV; in update_persistent_clock64()
792 ts->tv_nsec = 0; in __read_persistent_clock()
801 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; in __read_persistent_clock()
806 ts->tv_sec = 0; in __read_persistent_clock()
811 ts->tv_sec = rtc_tm_to_time64(&tm); in __read_persistent_clock()
819 if (ts->tv_sec < 0) { in read_persistent_clock64()
820 ts->tv_sec = 0; in read_persistent_clock64()
821 ts->tv_nsec = 0; in read_persistent_clock64()
836 struct clocksource *clock = tk->tkr_mono.clock; in update_vsyscall()
837 u32 mult = tk->tkr_mono.mult; in update_vsyscall()
838 u32 shift = tk->tkr_mono.shift; in update_vsyscall()
839 u64 cycle_last = tk->tkr_mono.cycle_last; in update_vsyscall()
846 xt.tv_sec = tk->xtime_sec; in update_vsyscall()
847 xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in update_vsyscall()
850 ++vdso_data->tb_update_count; in update_vsyscall()
855 * 0.64 fixed-point fraction. in update_vsyscall()
857 * (as long as the timebase frequency is >= 1.049 MHz) in update_vsyscall()
860 * For a shift of 24 the error is about 0.5e-9, or about 0.5ns in update_vsyscall()
862 * For high frequency clocks such as the 512MHz timebase clock in update_vsyscall()
865 * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the in update_vsyscall()
868 * the error is only about 1.2e-12, or 0.7ns over 10 minutes). in update_vsyscall()
870 if (mult <= 62500000 && clock->shift >= 16) in update_vsyscall()
871 new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16); in update_vsyscall()
873 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift); in update_vsyscall()
876 * Compute the fractional second in units of 2^-32 seconds. in update_vsyscall()
877 * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift in update_vsyscall()
879 * it in units of 2^-32 seconds. in update_vsyscall()
881 * generates shift values in the range 0 - 32. in update_vsyscall()
883 frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift); in update_vsyscall()
888 * stamp_xsec is in units of 2^-20 seconds. in update_vsyscall()
891 new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC; in update_vsyscall()
902 vdso_data->tb_orig_stamp = cycle_last; in update_vsyscall()
903 vdso_data->stamp_xsec = new_stamp_xsec; in update_vsyscall()
904 vdso_data->tb_to_xs = new_tb_to_xs; in update_vsyscall()
905 vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec; in update_vsyscall()
906 vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; in update_vsyscall()
907 vdso_data->stamp_xtime_sec = xt.tv_sec; in update_vsyscall()
908 vdso_data->stamp_xtime_nsec = xt.tv_nsec; in update_vsyscall()
909 vdso_data->stamp_sec_fraction = frac_sec; in update_vsyscall()
910 vdso_data->hrtimer_res = hrtimer_resolution; in update_vsyscall()
912 ++(vdso_data->tb_update_count); in update_vsyscall()
917 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; in update_vsyscall_tz()
918 vdso_data->tz_dsttime = sys_tz.tz_dsttime; in update_vsyscall_tz()
927 clock->name); in clocksource_init()
932 clock->name, clock->mult, clock->shift); in clocksource_init()
959 dec->cpumask = cpumask_of(cpu); in register_decrementer_clockevent()
964 dec->name, dec->mult, dec->shift, cpu); in register_decrementer_clockevent()
967 decrementer_clockevent.mult = dec->mult; in register_decrementer_clockevent()
968 decrementer_clockevent.shift = dec->shift; in register_decrementer_clockevent()
998 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) { in set_decrementer_max()
1000 pr_warn("time_init: firmware supplied invalid ibm,dec-bits"); in set_decrementer_max()
1005 decrementer_max = (1ul << (bits - 1)) - 1; in set_decrementer_max()
1041 /* Normal PowerPC with timebase register */ in time_init()
1043 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", in time_init()
1045 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", in time_init()
1056 * which is the timebase frequency. in time_init()
1058 * the 128-bit result as a 64.64 fixed-point number. in time_init()
1071 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ in time_init()
1076 sys_tz.tz_minuteswest = -timezone_offset / 60; in time_init()
1080 vdso_data->tb_update_count = 0; in time_init()
1081 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; in time_init()
1103 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1119 ra = ((u64)(a - (w * divisor)) << 32) + b; in div128_by_32()
1130 dr->result_high = ((u64)w << 32) + x; in div128_by_32()
1131 dr->result_low = ((u64)y << 32) + z; in div128_by_32()
1135 /* We don't need to calibrate delay, we use the CPU timebase for that */
1154 return -EOPNOTSUPP; in rtc_generic_set_time()
1157 return -EOPNOTSUPP; in rtc_generic_set_time()
1172 return -ENODEV; in rtc_init()
1174 pdev = platform_device_register_data(NULL, "rtc-generic", -1, in rtc_init()