• Home
  • Raw
  • Download

Lines Matching +full:cpu +full:- +full:thermal

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Thermal throttle event support code (such as syslog messaging and rate
6 * This allows consistent reporting of CPU thermal throttle events.
8 * Maintains a counter in /sys that keeps track of the number of thermal
9 * events, such that the user knows how bad the thermal problem might be
26 #include <linux/cpu.h>
29 #include <asm/thermal.h>
38 /* How long to wait between reporting thermal events */
45 * struct _thermal_state - Represent the current thermal event state
51 * @count: Stores the current running count for thermal
53 * @last_count: Stores the previous running count for thermal
55 * @max_time_ms: This shows the maximum amount of time CPU was
56 * in throttled state for a single thermal
58 * @total_time_ms: This is a cumulative time during which CPU was
61 * This is used for the purpose of rate-control.
72 * @baseline_temp: Temperature at which thermal threshold high
77 * This structure is used to represent data related to thermal state for a CPU.
78 * There is a separate storage for core and package level for each CPU.
142 unsigned int cpu = dev->id; \
145 preempt_disable(); /* CPU hotplug */ \
146 if (cpu_online(cpu)) { \
148 per_cpu(thermal_state, cpu).event.name); \
206 * "Table 2-2. IA-32 Architectural MSRs", MSR 0x19C in thermal_intr_init_core_clear_mask()
217 * Bit 7 and 9: Thermal Threshold #1 and #2 log in thermal_intr_init_core_clear_mask()
242 * "Table 2-2. IA-32 Architectural MSRs", MSR 0x1B1 in thermal_intr_init_pkg_clear_mask()
251 * Intel SDM Volume 2A: Thermal and Power Management Leaf in thermal_intr_init_pkg_clear_mask()
259 * Clear the bits in package thermal status register for bit = 1
308 get_therm_status(state->level, &hot, &temp); in throttle_active_work()
310 if (!hot && temp > state->baseline_temp) { in throttle_active_work()
311 if (state->rate_control_active) in throttle_active_work()
312 pr_info("CPU%d: %s temperature/speed normal (total events = %lu)\n", in throttle_active_work()
314 state->level == CORE_LEVEL ? "Core" : "Package", in throttle_active_work()
315 state->count); in throttle_active_work()
317 state->rate_control_active = false; in throttle_active_work()
321 if (time_before64(now, state->next_check) && in throttle_active_work()
322 state->rate_control_active) in throttle_active_work()
325 state->next_check = now + CHECK_INTERVAL; in throttle_active_work()
327 if (state->count != state->last_count) { in throttle_active_work()
328 /* There was one new thermal interrupt */ in throttle_active_work()
329 state->last_count = state->count; in throttle_active_work()
330 state->average = 0; in throttle_active_work()
331 state->sample_count = 0; in throttle_active_work()
332 state->sample_index = 0; in throttle_active_work()
335 state->temp_samples[state->sample_index] = temp; in throttle_active_work()
336 state->sample_count++; in throttle_active_work()
337 state->sample_index = (state->sample_index + 1) % ARRAY_SIZE(state->temp_samples); in throttle_active_work()
338 if (state->sample_count < ARRAY_SIZE(state->temp_samples)) in throttle_active_work()
342 for (i = 0; i < ARRAY_SIZE(state->temp_samples); ++i) in throttle_active_work()
343 avg += state->temp_samples[i]; in throttle_active_work()
345 avg /= ARRAY_SIZE(state->temp_samples); in throttle_active_work()
347 if (state->average > avg) { in throttle_active_work()
348 pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n", in throttle_active_work()
350 state->level == CORE_LEVEL ? "Core" : "Package", in throttle_active_work()
351 state->count); in throttle_active_work()
352 state->rate_control_active = true; in throttle_active_work()
355 state->average = avg; in throttle_active_work()
358 thermal_clear_package_intr_status(state->level, THERM_STATUS_PROCHOT_LOG); in throttle_active_work()
359 schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL); in throttle_active_work()
363 * therm_throt_process - Process thermal throttling event from interrupt
365 * thermal interrupt normally gets called both when the thermal
368 * This function is called by the thermal interrupt after the
384 state = &pstate->core_throttle; in therm_throt_process()
386 state = &pstate->core_power_limit; in therm_throt_process()
391 state = &pstate->package_throttle; in therm_throt_process()
393 state = &pstate->package_power_limit; in therm_throt_process()
399 old_event = state->new_event; in therm_throt_process()
400 state->new_event = new_event; in therm_throt_process()
403 state->count++; in therm_throt_process()
408 if (new_event && !state->last_interrupt_time) { in therm_throt_process()
412 get_therm_status(state->level, &hot, &temp); in therm_throt_process()
421 state->baseline_temp = temp; in therm_throt_process()
422 state->last_interrupt_time = now; in therm_throt_process()
423 schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL); in therm_throt_process()
424 } else if (old_event && state->last_interrupt_time) { in therm_throt_process()
427 throttle_time = jiffies_delta_to_msecs(now - state->last_interrupt_time); in therm_throt_process()
428 if (throttle_time > state->max_time_ms) in therm_throt_process()
429 state->max_time_ms = throttle_time; in therm_throt_process()
430 state->total_time_ms += throttle_time; in therm_throt_process()
431 state->last_interrupt_time = 0; in therm_throt_process()
443 state = (event == 0) ? &pstate->pkg_thresh0 : in thresh_event_valid()
444 &pstate->pkg_thresh1; in thresh_event_valid()
446 state = (event == 0) ? &pstate->core_thresh0 : in thresh_event_valid()
447 &pstate->core_thresh1; in thresh_event_valid()
449 if (time_before64(now, state->next_check)) in thresh_event_valid()
452 state->next_check = now + CHECK_INTERVAL; in thresh_event_valid()
467 /* Add/Remove thermal_throttle interface for CPU device: */
468 static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) in thermal_throttle_add_dev() argument
471 struct cpuinfo_x86 *c = &cpu_data(cpu); in thermal_throttle_add_dev()
473 err = sysfs_create_group(&dev->kobj, &thermal_attr_group); in thermal_throttle_add_dev()
478 err = sysfs_add_file_to_group(&dev->kobj, in thermal_throttle_add_dev()
486 err = sysfs_add_file_to_group(&dev->kobj, in thermal_throttle_add_dev()
492 err = sysfs_add_file_to_group(&dev->kobj, in thermal_throttle_add_dev()
498 err = sysfs_add_file_to_group(&dev->kobj, in thermal_throttle_add_dev()
505 err = sysfs_add_file_to_group(&dev->kobj, in thermal_throttle_add_dev()
516 sysfs_remove_group(&dev->kobj, &thermal_attr_group); in thermal_throttle_add_dev()
523 sysfs_remove_group(&dev->kobj, &thermal_attr_group); in thermal_throttle_remove_dev()
526 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
527 static int thermal_throttle_online(unsigned int cpu) in thermal_throttle_online() argument
529 struct thermal_state *state = &per_cpu(thermal_state, cpu); in thermal_throttle_online()
530 struct device *dev = get_cpu_device(cpu); in thermal_throttle_online()
533 state->package_throttle.level = PACKAGE_LEVEL; in thermal_throttle_online()
534 state->core_throttle.level = CORE_LEVEL; in thermal_throttle_online()
536 INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); in thermal_throttle_online()
537 INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); in thermal_throttle_online()
540 * The first CPU coming online will enable the HFI. Usually this causes in thermal_throttle_online()
541 * hardware to issue an HFI thermal interrupt. Such interrupt will reach in thermal_throttle_online()
542 * the CPU once we enable the thermal vector in the local APIC. in thermal_throttle_online()
544 intel_hfi_online(cpu); in thermal_throttle_online()
546 /* Unmask the thermal vector after the above workqueues are initialized. */ in thermal_throttle_online()
550 return thermal_throttle_add_dev(dev, cpu); in thermal_throttle_online()
553 static int thermal_throttle_offline(unsigned int cpu) in thermal_throttle_offline() argument
555 struct thermal_state *state = &per_cpu(thermal_state, cpu); in thermal_throttle_offline()
556 struct device *dev = get_cpu_device(cpu); in thermal_throttle_offline()
559 /* Mask the thermal vector before draining evtl. pending work */ in thermal_throttle_offline()
563 intel_hfi_offline(cpu); in thermal_throttle_offline()
565 cancel_delayed_work_sync(&state->package_throttle.therm_work); in thermal_throttle_offline()
566 cancel_delayed_work_sync(&state->core_throttle.therm_work); in thermal_throttle_offline()
568 state->package_throttle.rate_control_active = false; in thermal_throttle_offline()
569 state->core_throttle.rate_control_active = false; in thermal_throttle_offline()
649 /* Thermal transition interrupt handler */
659 /* Check for violation of core thermal thresholds*/ in intel_thermal_interrupt()
673 /* check violations of package thermal thresholds */ in intel_thermal_interrupt()
690 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
708 * This function is only called on boot CPU. Save the init thermal in therm_lvt_init()
709 * LVT value on BSP and use that value to restore APs' thermal LVT in therm_lvt_init()
718 unsigned int cpu = smp_processor_id(); in intel_init_thermal() local
734 * The initial value of thermal LVT entries on all APs always reads in intel_init_thermal()
735 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI in intel_init_thermal()
738 * If BIOS takes over the thermal interrupt and sets its interrupt in intel_init_thermal()
749 pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu); in intel_init_thermal()
755 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { in intel_init_thermal()
763 /* We'll mask the thermal vector in the lapic till we're ready: */ in intel_init_thermal()
810 pr_info_once("CPU0: Thermal monitoring enabled (%s)\n", in intel_init_thermal()
813 /* enable thermal throttle processing */ in intel_init_thermal()