• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
3  * Copyright (c) 2017, Intel Corporation.
4  * All rights reserved.
5  *
6  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  */
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/topology.h>
23 #include <linux/workqueue.h>
24 #include <linux/cpuhotplug.h>
25 #include <linux/cpufeature.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/intel-family.h>
28 
29 #define MSR_OC_MAILBOX			0x150
30 #define MSR_OC_MAILBOX_CMD_OFFSET	32
31 #define MSR_OC_MAILBOX_RSP_OFFSET	32
32 #define MSR_OC_MAILBOX_BUSY_BIT		63
33 #define OC_MAILBOX_FC_CONTROL_CMD	0x1C
34 
35 /*
36  * Typical latency to get mail box response is ~3us, It takes +3 us to
37  * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
38  * system. So for most of the time, the first mailbox read should have the
39  * response, but to avoid some boundary cases retry twice.
40  */
41 #define OC_MAILBOX_RETRY_COUNT		2
42 
get_oc_core_priority(unsigned int cpu)43 static int get_oc_core_priority(unsigned int cpu)
44 {
45 	u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
46 	int ret, i;
47 
48 	/* Issue favored core read command */
49 	value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
50 	/* Set the busy bit to indicate OS is trying to issue command */
51 	value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
52 	ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
53 	if (ret) {
54 		pr_debug("cpu %d OC mailbox write failed\n", cpu);
55 		return ret;
56 	}
57 
58 	for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
59 		ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
60 		if (ret) {
61 			pr_debug("cpu %d OC mailbox read failed\n", cpu);
62 			break;
63 		}
64 
65 		if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
66 			pr_debug("cpu %d OC mailbox still processing\n", cpu);
67 			ret = -EBUSY;
68 			continue;
69 		}
70 
71 		if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
72 			pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
73 			ret = -ENXIO;
74 			break;
75 		}
76 
77 		ret = value & 0xff;
78 		pr_debug("cpu %d max_ratio %d\n", cpu, ret);
79 		break;
80 	}
81 
82 	return ret;
83 }
84 
85 /*
86  * The work item is needed to avoid CPU hotplug locking issues. The function
87  * itmt_legacy_set_priority() is called from CPU online callback, so can't
88  * call sched_set_itmt_support() from there as this function will aquire
89  * hotplug locks in its path.
90  */
itmt_legacy_work_fn(struct work_struct * work)91 static void itmt_legacy_work_fn(struct work_struct *work)
92 {
93 	sched_set_itmt_support();
94 }
95 
96 static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
97 
itmt_legacy_cpu_online(unsigned int cpu)98 static int itmt_legacy_cpu_online(unsigned int cpu)
99 {
100 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
101 	int priority;
102 
103 	priority = get_oc_core_priority(cpu);
104 	if (priority < 0)
105 		return 0;
106 
107 	sched_set_itmt_core_prio(priority, cpu);
108 
109 	/* Enable ITMT feature when a core with different priority is found */
110 	if (max_highest_perf <= min_highest_perf) {
111 		if (priority > max_highest_perf)
112 			max_highest_perf = priority;
113 
114 		if (priority < min_highest_perf)
115 			min_highest_perf = priority;
116 
117 		if (max_highest_perf > min_highest_perf)
118 			schedule_work(&sched_itmt_work);
119 	}
120 
121 	return 0;
122 }
123 
124 #define ICPU(model)     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
125 
126 static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
127 	ICPU(INTEL_FAM6_BROADWELL_X),
128 	ICPU(INTEL_FAM6_SKYLAKE_X),
129 	{}
130 };
131 
itmt_legacy_init(void)132 static int __init itmt_legacy_init(void)
133 {
134 	const struct x86_cpu_id *id;
135 	int ret;
136 
137 	id = x86_match_cpu(itmt_legacy_cpu_ids);
138 	if (!id)
139 		return -ENODEV;
140 
141 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
142 				"platform/x86/turbo_max_3:online",
143 				itmt_legacy_cpu_online,	NULL);
144 	if (ret < 0)
145 		return ret;
146 
147 	return 0;
148 }
149 late_initcall(itmt_legacy_init)
150