• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Uncore Frequency Setting
4  * Copyright (c) 2019, Intel Corporation.
5  * All rights reserved.
6  *
7  * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
8  * one control CPU is identified per die to read/write limit. This control CPU
9  * is changed, if the CPU state is changed to offline. When the last CPU is
10  * offline in a die then remove the sysfs object for that die.
11  * The majority of actual code is related to sysfs create and read/write
12  * attributes.
13  *
14  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
15  */
16 
17 #include <linux/cpu.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/suspend.h>
21 #include <asm/cpu_device_id.h>
22 #include <asm/intel-family.h>
23 
24 #define MSR_UNCORE_RATIO_LIMIT			0x620
25 #define UNCORE_FREQ_KHZ_MULTIPLIER		100000
26 
27 /**
28  * struct uncore_data -	Encapsulate all uncore data
29  * @stored_uncore_data:	Last user changed MSR 620 value, which will be restored
30  *			on system resume.
31  * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
32  * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
33  * @control_cpu:	Designated CPU for a die to read/write
34  * @valid:		Mark the data valid/invalid
35  *
36  * This structure is used to encapsulate all data related to uncore sysfs
37  * settings for a die/package.
38  */
39 struct uncore_data {
40 	struct kobject kobj;
41 	struct completion kobj_unregister;
42 	u64 stored_uncore_data;
43 	u32 initial_min_freq_khz;
44 	u32 initial_max_freq_khz;
45 	int control_cpu;
46 	bool valid;
47 };
48 
49 #define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
50 
51 /* Max instances for uncore data, one for each die */
52 static int uncore_max_entries __read_mostly;
53 /* Storage for uncore data for all instances */
54 static struct uncore_data *uncore_instances;
55 /* Root of the all uncore sysfs kobjs */
56 static struct kobject *uncore_root_kobj;
57 /* Stores the CPU mask of the target CPUs to use during uncore read/write */
58 static cpumask_t uncore_cpu_mask;
59 /* CPU online callback register instance */
60 static enum cpuhp_state uncore_hp_state __read_mostly;
61 /* Mutex to control all mutual exclusions */
62 static DEFINE_MUTEX(uncore_lock);
63 
64 struct uncore_attr {
65 	struct attribute attr;
66 	ssize_t (*show)(struct kobject *kobj,
67 			struct attribute *attr, char *buf);
68 	ssize_t (*store)(struct kobject *kobj,
69 			 struct attribute *attr, const char *c, ssize_t count);
70 };
71 
72 #define define_one_uncore_ro(_name) \
73 static struct uncore_attr _name = \
74 __ATTR(_name, 0444, show_##_name, NULL)
75 
76 #define define_one_uncore_rw(_name) \
77 static struct uncore_attr _name = \
78 __ATTR(_name, 0644, show_##_name, store_##_name)
79 
80 #define show_uncore_data(member_name)					\
81 	static ssize_t show_##member_name(struct kobject *kobj,         \
82 					  struct attribute *attr,	\
83 					  char *buf)			\
84 	{                                                               \
85 		struct uncore_data *data = to_uncore_data(kobj);	\
86 		return scnprintf(buf, PAGE_SIZE, "%u\n",		\
87 				 data->member_name);			\
88 	}								\
89 	define_one_uncore_ro(member_name)
90 
91 show_uncore_data(initial_min_freq_khz);
92 show_uncore_data(initial_max_freq_khz);
93 
94 /* Common function to read MSR 0x620 and read min/max */
uncore_read_ratio(struct uncore_data * data,unsigned int * min,unsigned int * max)95 static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
96 			     unsigned int *max)
97 {
98 	u64 cap;
99 	int ret;
100 
101 	if (data->control_cpu < 0)
102 		return -ENXIO;
103 
104 	ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
105 	if (ret)
106 		return ret;
107 
108 	*max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
109 	*min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
110 
111 	return 0;
112 }
113 
114 /* Common function to set min/max ratios to be used by sysfs callbacks */
uncore_write_ratio(struct uncore_data * data,unsigned int input,int set_max)115 static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
116 			      int set_max)
117 {
118 	int ret;
119 	u64 cap;
120 
121 	mutex_lock(&uncore_lock);
122 
123 	if (data->control_cpu < 0) {
124 		ret = -ENXIO;
125 		goto finish_write;
126 	}
127 
128 	input /= UNCORE_FREQ_KHZ_MULTIPLIER;
129 	if (!input || input > 0x7F) {
130 		ret = -EINVAL;
131 		goto finish_write;
132 	}
133 
134 	ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
135 	if (ret)
136 		goto finish_write;
137 
138 	if (set_max) {
139 		cap &= ~0x7F;
140 		cap |= input;
141 	} else  {
142 		cap &= ~GENMASK(14, 8);
143 		cap |= (input << 8);
144 	}
145 
146 	ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
147 	if (ret)
148 		goto finish_write;
149 
150 	data->stored_uncore_data = cap;
151 
152 finish_write:
153 	mutex_unlock(&uncore_lock);
154 
155 	return ret;
156 }
157 
store_min_max_freq_khz(struct kobject * kobj,struct attribute * attr,const char * buf,ssize_t count,int min_max)158 static ssize_t store_min_max_freq_khz(struct kobject *kobj,
159 				      struct attribute *attr,
160 				      const char *buf, ssize_t count,
161 				      int min_max)
162 {
163 	struct uncore_data *data = to_uncore_data(kobj);
164 	unsigned int input;
165 
166 	if (kstrtouint(buf, 10, &input))
167 		return -EINVAL;
168 
169 	uncore_write_ratio(data, input, min_max);
170 
171 	return count;
172 }
173 
show_min_max_freq_khz(struct kobject * kobj,struct attribute * attr,char * buf,int min_max)174 static ssize_t show_min_max_freq_khz(struct kobject *kobj,
175 				     struct attribute *attr,
176 				     char *buf, int min_max)
177 {
178 	struct uncore_data *data = to_uncore_data(kobj);
179 	unsigned int min, max;
180 	int ret;
181 
182 	mutex_lock(&uncore_lock);
183 	ret = uncore_read_ratio(data, &min, &max);
184 	mutex_unlock(&uncore_lock);
185 	if (ret)
186 		return ret;
187 
188 	if (min_max)
189 		return sprintf(buf, "%u\n", max);
190 
191 	return sprintf(buf, "%u\n", min);
192 }
193 
194 #define store_uncore_min_max(name, min_max)				\
195 	static ssize_t store_##name(struct kobject *kobj,		\
196 				    struct attribute *attr,		\
197 				    const char *buf, ssize_t count)	\
198 	{                                                               \
199 									\
200 		return store_min_max_freq_khz(kobj, attr, buf, count,	\
201 					      min_max);			\
202 	}
203 
204 #define show_uncore_min_max(name, min_max)				\
205 	static ssize_t show_##name(struct kobject *kobj,		\
206 				   struct attribute *attr, char *buf)	\
207 	{                                                               \
208 									\
209 		return show_min_max_freq_khz(kobj, attr, buf, min_max); \
210 	}
211 
212 store_uncore_min_max(min_freq_khz, 0);
213 store_uncore_min_max(max_freq_khz, 1);
214 
215 show_uncore_min_max(min_freq_khz, 0);
216 show_uncore_min_max(max_freq_khz, 1);
217 
218 define_one_uncore_rw(min_freq_khz);
219 define_one_uncore_rw(max_freq_khz);
220 
221 static struct attribute *uncore_attrs[] = {
222 	&initial_min_freq_khz.attr,
223 	&initial_max_freq_khz.attr,
224 	&max_freq_khz.attr,
225 	&min_freq_khz.attr,
226 	NULL
227 };
228 
uncore_sysfs_entry_release(struct kobject * kobj)229 static void uncore_sysfs_entry_release(struct kobject *kobj)
230 {
231 	struct uncore_data *data = to_uncore_data(kobj);
232 
233 	complete(&data->kobj_unregister);
234 }
235 
236 static struct kobj_type uncore_ktype = {
237 	.release = uncore_sysfs_entry_release,
238 	.sysfs_ops = &kobj_sysfs_ops,
239 	.default_attrs = uncore_attrs,
240 };
241 
242 /* Caller provides protection */
uncore_get_instance(unsigned int cpu)243 static struct uncore_data *uncore_get_instance(unsigned int cpu)
244 {
245 	int id = topology_logical_die_id(cpu);
246 
247 	if (id >= 0 && id < uncore_max_entries)
248 		return &uncore_instances[id];
249 
250 	return NULL;
251 }
252 
uncore_add_die_entry(int cpu)253 static void uncore_add_die_entry(int cpu)
254 {
255 	struct uncore_data *data;
256 
257 	mutex_lock(&uncore_lock);
258 	data = uncore_get_instance(cpu);
259 	if (!data) {
260 		mutex_unlock(&uncore_lock);
261 		return;
262 	}
263 
264 	if (data->valid) {
265 		/* control cpu changed */
266 		data->control_cpu = cpu;
267 	} else {
268 		char str[64];
269 		int ret;
270 
271 		memset(data, 0, sizeof(*data));
272 		sprintf(str, "package_%02d_die_%02d",
273 			topology_physical_package_id(cpu),
274 			topology_die_id(cpu));
275 
276 		uncore_read_ratio(data, &data->initial_min_freq_khz,
277 				  &data->initial_max_freq_khz);
278 
279 		init_completion(&data->kobj_unregister);
280 
281 		ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
282 					   uncore_root_kobj, str);
283 		if (!ret) {
284 			data->control_cpu = cpu;
285 			data->valid = true;
286 		}
287 	}
288 	mutex_unlock(&uncore_lock);
289 }
290 
291 /* Last CPU in this die is offline, make control cpu invalid */
uncore_remove_die_entry(int cpu)292 static void uncore_remove_die_entry(int cpu)
293 {
294 	struct uncore_data *data;
295 
296 	mutex_lock(&uncore_lock);
297 	data = uncore_get_instance(cpu);
298 	if (data)
299 		data->control_cpu = -1;
300 	mutex_unlock(&uncore_lock);
301 }
302 
uncore_event_cpu_online(unsigned int cpu)303 static int uncore_event_cpu_online(unsigned int cpu)
304 {
305 	int target;
306 
307 	/* Check if there is an online cpu in the package for uncore MSR */
308 	target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
309 	if (target < nr_cpu_ids)
310 		return 0;
311 
312 	/* Use this CPU on this die as a control CPU */
313 	cpumask_set_cpu(cpu, &uncore_cpu_mask);
314 	uncore_add_die_entry(cpu);
315 
316 	return 0;
317 }
318 
uncore_event_cpu_offline(unsigned int cpu)319 static int uncore_event_cpu_offline(unsigned int cpu)
320 {
321 	int target;
322 
323 	/* Check if existing cpu is used for uncore MSRs */
324 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
325 		return 0;
326 
327 	/* Find a new cpu to set uncore MSR */
328 	target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
329 
330 	if (target < nr_cpu_ids) {
331 		cpumask_set_cpu(target, &uncore_cpu_mask);
332 		uncore_add_die_entry(target);
333 	} else {
334 		uncore_remove_die_entry(cpu);
335 	}
336 
337 	return 0;
338 }
339 
uncore_pm_notify(struct notifier_block * nb,unsigned long mode,void * _unused)340 static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
341 			    void *_unused)
342 {
343 	int cpu;
344 
345 	switch (mode) {
346 	case PM_POST_HIBERNATION:
347 	case PM_POST_RESTORE:
348 	case PM_POST_SUSPEND:
349 		for_each_cpu(cpu, &uncore_cpu_mask) {
350 			struct uncore_data *data;
351 			int ret;
352 
353 			data = uncore_get_instance(cpu);
354 			if (!data || !data->valid || !data->stored_uncore_data)
355 				continue;
356 
357 			ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
358 					    data->stored_uncore_data);
359 			if (ret)
360 				return ret;
361 		}
362 		break;
363 	default:
364 		break;
365 	}
366 	return 0;
367 }
368 
369 static struct notifier_block uncore_pm_nb = {
370 	.notifier_call = uncore_pm_notify,
371 };
372 
373 static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
374 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G,	NULL),
375 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,	NULL),
376 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,	NULL),
377 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,	NULL),
378 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,	NULL),
379 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,	NULL),
380 	{}
381 };
382 
intel_uncore_init(void)383 static int __init intel_uncore_init(void)
384 {
385 	const struct x86_cpu_id *id;
386 	int ret;
387 
388 	id = x86_match_cpu(intel_uncore_cpu_ids);
389 	if (!id)
390 		return -ENODEV;
391 
392 	uncore_max_entries = topology_max_packages() *
393 					topology_max_die_per_package();
394 	uncore_instances = kcalloc(uncore_max_entries,
395 				   sizeof(*uncore_instances), GFP_KERNEL);
396 	if (!uncore_instances)
397 		return -ENOMEM;
398 
399 	uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency",
400 						  &cpu_subsys.dev_root->kobj);
401 	if (!uncore_root_kobj) {
402 		ret = -ENOMEM;
403 		goto err_free;
404 	}
405 
406 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
407 				"platform/x86/uncore-freq:online",
408 				uncore_event_cpu_online,
409 				uncore_event_cpu_offline);
410 	if (ret < 0)
411 		goto err_rem_kobj;
412 
413 	uncore_hp_state = ret;
414 
415 	ret = register_pm_notifier(&uncore_pm_nb);
416 	if (ret)
417 		goto err_rem_state;
418 
419 	return 0;
420 
421 err_rem_state:
422 	cpuhp_remove_state(uncore_hp_state);
423 err_rem_kobj:
424 	kobject_put(uncore_root_kobj);
425 err_free:
426 	kfree(uncore_instances);
427 
428 	return ret;
429 }
module_init(intel_uncore_init)430 module_init(intel_uncore_init)
431 
432 static void __exit intel_uncore_exit(void)
433 {
434 	int i;
435 
436 	unregister_pm_notifier(&uncore_pm_nb);
437 	cpuhp_remove_state(uncore_hp_state);
438 	for (i = 0; i < uncore_max_entries; ++i) {
439 		if (uncore_instances[i].valid) {
440 			kobject_put(&uncore_instances[i].kobj);
441 			wait_for_completion(&uncore_instances[i].kobj_unregister);
442 		}
443 	}
444 	kobject_put(uncore_root_kobj);
445 	kfree(uncore_instances);
446 }
447 module_exit(intel_uncore_exit)
448 
449 MODULE_LICENSE("GPL v2");
450 MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
451