1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * uncore-frquency-tpmi: Uncore frequency scaling using TPMI
4 *
5 * Copyright (c) 2023, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * The hardware interface to read/write is basically substitution of
9 * MSR 0x620 and 0x621.
10 * There are specific MMIO offset and bits to get/set minimum and
11 * maximum uncore ratio, similar to MSRs.
12 * The scope of the uncore MSRs was package scope. But TPMI allows
13 * new gen CPUs to have multiple uncore controls at uncore-cluster
14 * level. Each package can have multiple power domains which further
15 * can have multiple clusters.
16 * Here number of power domains = number of resources in this aux
17 * device. There are offsets and bits to discover number of clusters
18 * and offset for each cluster level controls.
19 *
20 */
21
22 #include <linux/auxiliary_bus.h>
23 #include <linux/bitfield.h>
24 #include <linux/bits.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/intel_tpmi.h>
28
29 #include "uncore-frequency-common.h"
30
31 #define UNCORE_MAJOR_VERSION 0
32 #define UNCORE_MINOR_VERSION 2
33 #define UNCORE_ELC_SUPPORTED_VERSION 2
34 #define UNCORE_HEADER_INDEX 0
35 #define UNCORE_FABRIC_CLUSTER_OFFSET 8
36
37 /* status + control + adv_ctl1 + adv_ctl2 */
38 #define UNCORE_FABRIC_CLUSTER_SIZE (4 * 8)
39
40 #define UNCORE_STATUS_INDEX 0
41 #define UNCORE_CONTROL_INDEX 8
42
43 #define UNCORE_FREQ_KHZ_MULTIPLIER 100000
44
45 struct tpmi_uncore_struct;
46
47 /* Information for each cluster */
48 struct tpmi_uncore_cluster_info {
49 bool root_domain;
50 bool elc_supported;
51 u8 __iomem *cluster_base;
52 struct uncore_data uncore_data;
53 struct tpmi_uncore_struct *uncore_root;
54 };
55
56 /* Information for each power domain */
57 struct tpmi_uncore_power_domain_info {
58 u8 __iomem *uncore_base;
59 int ufs_header_ver;
60 int cluster_count;
61 struct tpmi_uncore_cluster_info *cluster_infos;
62 };
63
64 /* Information for all power domains in a package */
65 struct tpmi_uncore_struct {
66 int power_domain_count;
67 int max_ratio;
68 int min_ratio;
69 struct tpmi_uncore_power_domain_info *pd_info;
70 struct tpmi_uncore_cluster_info root_cluster;
71 bool write_blocked;
72 };
73
74 /* Bit definitions for STATUS register */
75 #define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
76
77 /* Bit definitions for CONTROL register */
78 #define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8)
79 #define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15)
80 #define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22)
81 #define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32)
82 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39)
83 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40)
84
85 /* Helper function to read MMIO offset for max/min control frequency */
read_control_freq(struct tpmi_uncore_cluster_info * cluster_info,unsigned int * value,enum uncore_index index)86 static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
87 unsigned int *value, enum uncore_index index)
88 {
89 u64 control;
90
91 control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
92 if (index == UNCORE_INDEX_MAX_FREQ)
93 *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
94 else
95 *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
96 }
97
98 /* Helper function to read efficiency latency control values over MMIO */
read_eff_lat_ctrl(struct uncore_data * data,unsigned int * val,enum uncore_index index)99 static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index)
100 {
101 struct tpmi_uncore_cluster_info *cluster_info;
102 u64 ctrl;
103
104 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
105 if (cluster_info->root_domain)
106 return -ENODATA;
107
108 if (!cluster_info->elc_supported)
109 return -EOPNOTSUPP;
110
111 ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
112
113 switch (index) {
114 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
115 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl);
116 *val *= 100;
117 *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK));
118 break;
119
120 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
121 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl);
122 *val *= 100;
123 *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK));
124 break;
125
126 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
127 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl);
128 break;
129 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
130 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER;
131 break;
132
133 default:
134 return -EOPNOTSUPP;
135 }
136
137 return 0;
138 }
139
140 #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK)
141
142 /* Helper for sysfs read for max/min frequencies. Called under mutex locks */
uncore_read_control_freq(struct uncore_data * data,unsigned int * value,enum uncore_index index)143 static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
144 enum uncore_index index)
145 {
146 struct tpmi_uncore_cluster_info *cluster_info;
147
148 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
149
150 if (cluster_info->root_domain) {
151 struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
152 unsigned int min, max, v;
153 int i;
154
155 min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
156 max = 0;
157
158 /*
159 * Get the max/min by looking at each cluster. Get the lowest
160 * min and highest max.
161 */
162 for (i = 0; i < uncore_root->power_domain_count; ++i) {
163 int j;
164
165 for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
166 read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
167 &v, index);
168 if (v < min)
169 min = v;
170 if (v > max)
171 max = v;
172 }
173 }
174
175 if (index == UNCORE_INDEX_MIN_FREQ)
176 *value = min;
177 else
178 *value = max;
179
180 return 0;
181 }
182
183 read_control_freq(cluster_info, value, index);
184
185 return 0;
186 }
187
188 /* Helper function for writing efficiency latency control values over MMIO */
write_eff_lat_ctrl(struct uncore_data * data,unsigned int val,enum uncore_index index)189 static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
190 {
191 struct tpmi_uncore_cluster_info *cluster_info;
192 struct tpmi_uncore_struct *uncore_root;
193 u64 control;
194
195 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
196 uncore_root = cluster_info->uncore_root;
197
198 if (uncore_root->write_blocked)
199 return -EPERM;
200
201 if (cluster_info->root_domain)
202 return -ENODATA;
203
204 if (!cluster_info->elc_supported)
205 return -EOPNOTSUPP;
206
207 switch (index) {
208 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
209 if (val > 100)
210 return -EINVAL;
211 break;
212
213 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
214 if (val > 100)
215 return -EINVAL;
216 break;
217
218 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
219 if (val > 1)
220 return -EINVAL;
221 break;
222
223 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
224 val /= UNCORE_FREQ_KHZ_MULTIPLIER;
225 if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK))
226 return -EINVAL;
227 break;
228
229 default:
230 return -EOPNOTSUPP;
231 }
232
233 control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
234
235 switch (index) {
236 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
237 val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK);
238 val /= 100;
239 control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK;
240 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val);
241 break;
242
243 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
244 val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK);
245 val /= 100;
246 control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK;
247 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val);
248 break;
249
250 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
251 control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE;
252 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val);
253 break;
254
255 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
256 control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK;
257 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val);
258 break;
259
260 default:
261 break;
262 }
263
264 writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
265
266 return 0;
267 }
268
269 /* Helper function to write MMIO offset for max/min control frequency */
write_control_freq(struct tpmi_uncore_cluster_info * cluster_info,unsigned int input,unsigned int index)270 static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
271 unsigned int index)
272 {
273 u64 control;
274
275 control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
276
277 if (index == UNCORE_INDEX_MAX_FREQ) {
278 control &= ~UNCORE_MAX_RATIO_MASK;
279 control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
280 } else {
281 control &= ~UNCORE_MIN_RATIO_MASK;
282 control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
283 }
284
285 writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
286 }
287
288 /* Helper for sysfs write for max/min frequencies. Called under mutex locks */
uncore_write_control_freq(struct uncore_data * data,unsigned int input,enum uncore_index index)289 static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
290 enum uncore_index index)
291 {
292 struct tpmi_uncore_cluster_info *cluster_info;
293 struct tpmi_uncore_struct *uncore_root;
294
295 input /= UNCORE_FREQ_KHZ_MULTIPLIER;
296 if (!input || input > UNCORE_MAX_RATIO)
297 return -EINVAL;
298
299 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
300 uncore_root = cluster_info->uncore_root;
301
302 if (uncore_root->write_blocked)
303 return -EPERM;
304
305 /* Update each cluster in a package */
306 if (cluster_info->root_domain) {
307 struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
308 int i;
309
310 for (i = 0; i < uncore_root->power_domain_count; ++i) {
311 int j;
312
313 for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
314 write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
315 input, index);
316 }
317
318 if (index == UNCORE_INDEX_MAX_FREQ)
319 uncore_root->max_ratio = input;
320 else
321 uncore_root->min_ratio = input;
322
323 return 0;
324 }
325
326 if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
327 uncore_root->max_ratio < input)
328 return -EINVAL;
329
330 if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
331 uncore_root->min_ratio > input)
332 return -EINVAL;
333
334 write_control_freq(cluster_info, input, index);
335
336 return 0;
337 }
338
339 /* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
uncore_read_freq(struct uncore_data * data,unsigned int * freq)340 static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
341 {
342 struct tpmi_uncore_cluster_info *cluster_info;
343 u64 status;
344
345 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
346 if (cluster_info->root_domain)
347 return -ENODATA;
348
349 status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
350 *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
351
352 return 0;
353 }
354
355 /* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
uncore_read(struct uncore_data * data,unsigned int * value,enum uncore_index index)356 static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
357 {
358 switch (index) {
359 case UNCORE_INDEX_MIN_FREQ:
360 case UNCORE_INDEX_MAX_FREQ:
361 return uncore_read_control_freq(data, value, index);
362
363 case UNCORE_INDEX_CURRENT_FREQ:
364 return uncore_read_freq(data, value);
365
366 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
367 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
368 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
369 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
370 return read_eff_lat_ctrl(data, value, index);
371
372 default:
373 break;
374 }
375
376 return -EOPNOTSUPP;
377 }
378
379 /* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */
uncore_write(struct uncore_data * data,unsigned int value,enum uncore_index index)380 static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index)
381 {
382 switch (index) {
383 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
384 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
385 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
386 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
387 return write_eff_lat_ctrl(data, value, index);
388
389 case UNCORE_INDEX_MIN_FREQ:
390 case UNCORE_INDEX_MAX_FREQ:
391 return uncore_write_control_freq(data, value, index);
392
393 default:
394 break;
395 }
396
397 return -EOPNOTSUPP;
398 }
399
remove_cluster_entries(struct tpmi_uncore_struct * tpmi_uncore)400 static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
401 {
402 int i;
403
404 for (i = 0; i < tpmi_uncore->power_domain_count; ++i) {
405 struct tpmi_uncore_power_domain_info *pd_info;
406 int j;
407
408 pd_info = &tpmi_uncore->pd_info[i];
409 if (!pd_info->uncore_base)
410 continue;
411
412 for (j = 0; j < pd_info->cluster_count; ++j) {
413 struct tpmi_uncore_cluster_info *cluster_info;
414
415 cluster_info = &pd_info->cluster_infos[j];
416 uncore_freq_remove_die_entry(&cluster_info->uncore_data);
417 }
418 }
419 }
420
421 #define UNCORE_VERSION_MASK GENMASK_ULL(7, 0)
422 #define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8)
423 #define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0)
424 #define UNCORE_MAX_CLUSTER_PER_DOMAIN 8
425
uncore_probe(struct auxiliary_device * auxdev,const struct auxiliary_device_id * id)426 static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
427 {
428 bool read_blocked = 0, write_blocked = 0;
429 struct intel_tpmi_plat_info *plat_info;
430 struct tpmi_uncore_struct *tpmi_uncore;
431 bool uncore_sysfs_added = false;
432 int ret, i, pkg = 0;
433 int num_resources;
434
435 ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked);
436 if (ret)
437 dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n");
438
439 if (read_blocked) {
440 dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
441 return -ENODEV;
442 }
443
444 /* Get number of power domains, which is equal to number of resources */
445 num_resources = tpmi_get_resource_count(auxdev);
446 if (!num_resources)
447 return -EINVAL;
448
449 /* Register callbacks to uncore core */
450 ret = uncore_freq_common_init(uncore_read, uncore_write);
451 if (ret)
452 return ret;
453
454 /* Allocate uncore instance per package */
455 tpmi_uncore = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_uncore), GFP_KERNEL);
456 if (!tpmi_uncore) {
457 ret = -ENOMEM;
458 goto err_rem_common;
459 }
460
461 /* Allocate memory for all power domains in a package */
462 tpmi_uncore->pd_info = devm_kcalloc(&auxdev->dev, num_resources,
463 sizeof(*tpmi_uncore->pd_info),
464 GFP_KERNEL);
465 if (!tpmi_uncore->pd_info) {
466 ret = -ENOMEM;
467 goto err_rem_common;
468 }
469
470 tpmi_uncore->power_domain_count = num_resources;
471 tpmi_uncore->write_blocked = write_blocked;
472
473 /* Get the package ID from the TPMI core */
474 plat_info = tpmi_get_platform_data(auxdev);
475 if (unlikely(!plat_info)) {
476 dev_info(&auxdev->dev, "Platform information is NULL\n");
477 ret = -ENODEV;
478 goto err_rem_common;
479 }
480
481 pkg = plat_info->package_id;
482
483 for (i = 0; i < num_resources; ++i) {
484 struct tpmi_uncore_power_domain_info *pd_info;
485 struct resource *res;
486 u64 cluster_offset;
487 u8 cluster_mask;
488 int mask, j;
489 u64 header;
490
491 res = tpmi_get_resource_at_index(auxdev, i);
492 if (!res)
493 continue;
494
495 pd_info = &tpmi_uncore->pd_info[i];
496
497 pd_info->uncore_base = devm_ioremap_resource(&auxdev->dev, res);
498 if (IS_ERR(pd_info->uncore_base)) {
499 ret = PTR_ERR(pd_info->uncore_base);
500 /*
501 * Set to NULL so that clean up can still remove other
502 * entries already created if any by
503 * remove_cluster_entries()
504 */
505 pd_info->uncore_base = NULL;
506 goto remove_clusters;
507 }
508
509 /* Check for version and skip this resource if there is mismatch */
510 header = readq(pd_info->uncore_base);
511 pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK;
512
513 if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID)
514 continue;
515
516 if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) {
517 dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n",
518 TPMI_MAJOR_VERSION(pd_info->ufs_header_ver));
519 ret = -ENODEV;
520 goto remove_clusters;
521 }
522
523 if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
524 dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
525 TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
526
527 /* Get Cluster ID Mask */
528 cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header);
529 if (!cluster_mask) {
530 dev_info(&auxdev->dev, "Uncore: Invalid cluster mask:%x\n", cluster_mask);
531 continue;
532 }
533
534 /* Find out number of clusters in this resource */
535 pd_info->cluster_count = hweight8(cluster_mask);
536
537 pd_info->cluster_infos = devm_kcalloc(&auxdev->dev, pd_info->cluster_count,
538 sizeof(struct tpmi_uncore_cluster_info),
539 GFP_KERNEL);
540 if (!pd_info->cluster_infos) {
541 ret = -ENOMEM;
542 goto remove_clusters;
543 }
544 /*
545 * Each byte in the register point to status and control
546 * registers belonging to cluster id 0-8.
547 */
548 cluster_offset = readq(pd_info->uncore_base +
549 UNCORE_FABRIC_CLUSTER_OFFSET);
550
551 for (j = 0; j < pd_info->cluster_count; ++j) {
552 struct tpmi_uncore_cluster_info *cluster_info;
553
554 /* Get the offset for this cluster */
555 mask = (cluster_offset & UNCORE_CLUSTER_OFF_MASK);
556 /* Offset in QWORD, so change to bytes */
557 mask <<= 3;
558
559 cluster_info = &pd_info->cluster_infos[j];
560
561 cluster_info->cluster_base = pd_info->uncore_base + mask;
562
563 cluster_info->uncore_data.package_id = pkg;
564 /* There are no dies like Cascade Lake */
565 cluster_info->uncore_data.die_id = 0;
566 cluster_info->uncore_data.domain_id = i;
567 cluster_info->uncore_data.cluster_id = j;
568
569 cluster_info->uncore_root = tpmi_uncore;
570
571 if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
572 cluster_info->elc_supported = true;
573
574 ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
575 if (ret) {
576 cluster_info->cluster_base = NULL;
577 goto remove_clusters;
578 }
579 /* Point to next cluster offset */
580 cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
581 uncore_sysfs_added = true;
582 }
583 }
584
585 if (!uncore_sysfs_added) {
586 ret = -ENODEV;
587 goto remove_clusters;
588 }
589
590 auxiliary_set_drvdata(auxdev, tpmi_uncore);
591
592 if (topology_max_dies_per_package() > 1)
593 return 0;
594
595 tpmi_uncore->root_cluster.root_domain = true;
596 tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
597
598 tpmi_uncore->root_cluster.uncore_data.package_id = pkg;
599 tpmi_uncore->root_cluster.uncore_data.domain_id = UNCORE_DOMAIN_ID_INVALID;
600 ret = uncore_freq_add_entry(&tpmi_uncore->root_cluster.uncore_data, 0);
601 if (ret)
602 goto remove_clusters;
603
604 return 0;
605
606 remove_clusters:
607 remove_cluster_entries(tpmi_uncore);
608 err_rem_common:
609 uncore_freq_common_exit();
610
611 return ret;
612 }
613
uncore_remove(struct auxiliary_device * auxdev)614 static void uncore_remove(struct auxiliary_device *auxdev)
615 {
616 struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
617
618 if (tpmi_uncore->root_cluster.root_domain)
619 uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
620
621 remove_cluster_entries(tpmi_uncore);
622
623 uncore_freq_common_exit();
624 }
625
626 static const struct auxiliary_device_id intel_uncore_id_table[] = {
627 { .name = "intel_vsec.tpmi-uncore" },
628 {}
629 };
630 MODULE_DEVICE_TABLE(auxiliary, intel_uncore_id_table);
631
632 static struct auxiliary_driver intel_uncore_aux_driver = {
633 .id_table = intel_uncore_id_table,
634 .remove = uncore_remove,
635 .probe = uncore_probe,
636 };
637
638 module_auxiliary_driver(intel_uncore_aux_driver);
639
640 MODULE_IMPORT_NS(INTEL_TPMI);
641 MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
642 MODULE_DESCRIPTION("Intel TPMI UFS Driver");
643 MODULE_LICENSE("GPL");
644