• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 #include <linux/jiffies.h>
10 #include <linux/ktime.h>
11 #include <linux/wait_bit.h>
12 
13 #include <drm/drm_managed.h>
14 #include <generated/xe_wa_oob.h>
15 
16 #include "abi/guc_actions_slpc_abi.h"
17 #include "regs/xe_gt_regs.h"
18 #include "regs/xe_regs.h"
19 #include "xe_bo.h"
20 #include "xe_device.h"
21 #include "xe_force_wake.h"
22 #include "xe_gt.h"
23 #include "xe_gt_idle.h"
24 #include "xe_gt_printk.h"
25 #include "xe_gt_types.h"
26 #include "xe_guc.h"
27 #include "xe_guc_ct.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pcode.h"
31 #include "xe_pm.h"
32 #include "xe_sriov.h"
33 #include "xe_wa.h"
34 
35 #define MCHBAR_MIRROR_BASE_SNB	0x140000
36 
37 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
38 #define   RP0_MASK		REG_GENMASK(7, 0)
39 #define   RP1_MASK		REG_GENMASK(15, 8)
40 #define   RPN_MASK		REG_GENMASK(23, 16)
41 
42 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
43 #define   RPE_MASK		REG_GENMASK(15, 8)
44 
45 #define GT_PERF_STATUS		XE_REG(0x1381b4)
46 #define   CAGF_MASK	REG_GENMASK(19, 11)
47 
48 #define GT_FREQUENCY_MULTIPLIER	50
49 #define GT_FREQUENCY_SCALER	3
50 
51 #define LNL_MERT_FREQ_CAP	800
52 #define BMG_MERT_FREQ_CAP	2133
53 #define BMG_MIN_FREQ		1200
54 #define BMG_MERT_FLUSH_FREQ_CAP	2600
55 
56 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
57 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
58 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
59 
60 /**
61  * DOC: GuC Power Conservation (PC)
62  *
63  * GuC Power Conservation (PC) supports multiple features for the most
64  * efficient and performing use of the GT when GuC submission is enabled,
65  * including frequency management, Render-C states management, and various
66  * algorithms for power balancing.
67  *
68  * Single Loop Power Conservation (SLPC) is the name given to the suite of
69  * connected power conservation features in the GuC firmware. The firmware
70  * exposes a programming interface to the host for the control of SLPC.
71  *
72  * Frequency management:
73  * =====================
74  *
75  * Xe driver enables SLPC with all of its defaults features and frequency
76  * selection, which varies per platform.
77  *
78  * Render-C States:
79  * ================
80  *
81  * Render-C states is also a GuC PC feature that is now enabled in Xe for
82  * all platforms.
83  *
84  */
85 
pc_to_guc(struct xe_guc_pc * pc)86 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
87 {
88 	return container_of(pc, struct xe_guc, pc);
89 }
90 
pc_to_ct(struct xe_guc_pc * pc)91 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
92 {
93 	return &pc_to_guc(pc)->ct;
94 }
95 
pc_to_gt(struct xe_guc_pc * pc)96 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
97 {
98 	return guc_to_gt(pc_to_guc(pc));
99 }
100 
pc_to_xe(struct xe_guc_pc * pc)101 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
102 {
103 	return guc_to_xe(pc_to_guc(pc));
104 }
105 
pc_to_maps(struct xe_guc_pc * pc)106 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
107 {
108 	return &pc->bo->vmap;
109 }
110 
111 #define slpc_shared_data_read(pc_, field_) \
112 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
113 			struct slpc_shared_data, field_)
114 
115 #define slpc_shared_data_write(pc_, field_, val_) \
116 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
117 			struct slpc_shared_data, field_, val_)
118 
119 #define SLPC_EVENT(id, count) \
120 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
121 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
122 
wait_for_pc_state(struct xe_guc_pc * pc,enum slpc_global_state state)123 static int wait_for_pc_state(struct xe_guc_pc *pc,
124 			     enum slpc_global_state state)
125 {
126 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
127 	int slept, wait = 10;
128 
129 	xe_device_assert_mem_access(pc_to_xe(pc));
130 
131 	for (slept = 0; slept < timeout_us;) {
132 		if (slpc_shared_data_read(pc, header.global_state) == state)
133 			return 0;
134 
135 		usleep_range(wait, wait << 1);
136 		slept += wait;
137 		wait <<= 1;
138 		if (slept + wait > timeout_us)
139 			wait = timeout_us - slept;
140 	}
141 
142 	return -ETIMEDOUT;
143 }
144 
wait_for_flush_complete(struct xe_guc_pc * pc)145 static int wait_for_flush_complete(struct xe_guc_pc *pc)
146 {
147 	const unsigned long timeout = msecs_to_jiffies(30);
148 
149 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
150 				    !atomic_read(&pc->flush_freq_limit),
151 				    timeout))
152 		return -ETIMEDOUT;
153 
154 	return 0;
155 }
156 
wait_for_act_freq_limit(struct xe_guc_pc * pc,u32 freq)157 static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
158 {
159 	int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
160 	int slept, wait = 10;
161 
162 	for (slept = 0; slept < timeout_us;) {
163 		if (xe_guc_pc_get_act_freq(pc) <= freq)
164 			return 0;
165 
166 		usleep_range(wait, wait << 1);
167 		slept += wait;
168 		wait <<= 1;
169 		if (slept + wait > timeout_us)
170 			wait = timeout_us - slept;
171 	}
172 
173 	return -ETIMEDOUT;
174 }
pc_action_reset(struct xe_guc_pc * pc)175 static int pc_action_reset(struct xe_guc_pc *pc)
176 {
177 	struct xe_guc_ct *ct = pc_to_ct(pc);
178 	u32 action[] = {
179 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
180 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
181 		xe_bo_ggtt_addr(pc->bo),
182 		0,
183 	};
184 	int ret;
185 
186 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
187 	if (ret)
188 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
189 			  ERR_PTR(ret));
190 
191 	return ret;
192 }
193 
pc_action_query_task_state(struct xe_guc_pc * pc)194 static int pc_action_query_task_state(struct xe_guc_pc *pc)
195 {
196 	struct xe_guc_ct *ct = pc_to_ct(pc);
197 	u32 action[] = {
198 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
199 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
200 		xe_bo_ggtt_addr(pc->bo),
201 		0,
202 	};
203 	int ret;
204 
205 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
206 		return -EAGAIN;
207 
208 	/* Blocking here to ensure the results are ready before reading them */
209 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
210 	if (ret)
211 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
212 			  ERR_PTR(ret));
213 
214 	return ret;
215 }
216 
pc_action_set_param(struct xe_guc_pc * pc,u8 id,u32 value)217 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
218 {
219 	struct xe_guc_ct *ct = pc_to_ct(pc);
220 	u32 action[] = {
221 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
222 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
223 		id,
224 		value,
225 	};
226 	int ret;
227 
228 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
229 		return -EAGAIN;
230 
231 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
232 	if (ret)
233 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
234 			  id, value, ERR_PTR(ret));
235 
236 	return ret;
237 }
238 
pc_action_unset_param(struct xe_guc_pc * pc,u8 id)239 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
240 {
241 	u32 action[] = {
242 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
243 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
244 		id,
245 	};
246 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
247 	int ret;
248 
249 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
250 		return -EAGAIN;
251 
252 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
253 	if (ret)
254 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
255 			  ERR_PTR(ret));
256 
257 	return ret;
258 }
259 
pc_action_setup_gucrc(struct xe_guc_pc * pc,u32 mode)260 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
261 {
262 	struct xe_guc_ct *ct = pc_to_ct(pc);
263 	u32 action[] = {
264 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
265 		mode,
266 	};
267 	int ret;
268 
269 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
270 	if (ret)
271 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
272 			  mode, ERR_PTR(ret));
273 	return ret;
274 }
275 
decode_freq(u32 raw)276 static u32 decode_freq(u32 raw)
277 {
278 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
279 				 GT_FREQUENCY_SCALER);
280 }
281 
encode_freq(u32 freq)282 static u32 encode_freq(u32 freq)
283 {
284 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
285 				 GT_FREQUENCY_MULTIPLIER);
286 }
287 
pc_get_min_freq(struct xe_guc_pc * pc)288 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
289 {
290 	u32 freq;
291 
292 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
293 			 slpc_shared_data_read(pc, task_state_data.freq));
294 
295 	return decode_freq(freq);
296 }
297 
pc_set_manual_rp_ctrl(struct xe_guc_pc * pc,bool enable)298 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
299 {
300 	struct xe_gt *gt = pc_to_gt(pc);
301 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
302 
303 	/* Allow/Disallow punit to process software freq requests */
304 	xe_mmio_write32(gt, RP_CONTROL, state);
305 }
306 
pc_set_cur_freq(struct xe_guc_pc * pc,u32 freq)307 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
308 {
309 	struct xe_gt *gt = pc_to_gt(pc);
310 	u32 rpnswreq;
311 
312 	pc_set_manual_rp_ctrl(pc, true);
313 
314 	/* Req freq is in units of 16.66 Mhz */
315 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
316 	xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
317 
318 	/* Sleep for a small time to allow pcode to respond */
319 	usleep_range(100, 300);
320 
321 	pc_set_manual_rp_ctrl(pc, false);
322 }
323 
pc_set_min_freq(struct xe_guc_pc * pc,u32 freq)324 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
325 {
326 	/*
327 	 * Let's only check for the rpn-rp0 range. If max < min,
328 	 * min becomes a fixed request.
329 	 */
330 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
331 		return -EINVAL;
332 
333 	/*
334 	 * GuC policy is to elevate minimum frequency to the efficient levels
335 	 * Our goal is to have the admin choices respected.
336 	 */
337 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
338 			    freq < pc->rpe_freq);
339 
340 	return pc_action_set_param(pc,
341 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
342 				   freq);
343 }
344 
pc_get_max_freq(struct xe_guc_pc * pc)345 static int pc_get_max_freq(struct xe_guc_pc *pc)
346 {
347 	u32 freq;
348 
349 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
350 			 slpc_shared_data_read(pc, task_state_data.freq));
351 
352 	return decode_freq(freq);
353 }
354 
pc_set_max_freq(struct xe_guc_pc * pc,u32 freq)355 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
356 {
357 	/*
358 	 * Let's only check for the rpn-rp0 range. If max < min,
359 	 * min becomes a fixed request.
360 	 * Also, overclocking is not supported.
361 	 */
362 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
363 		return -EINVAL;
364 
365 	return pc_action_set_param(pc,
366 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
367 				   freq);
368 }
369 
mtl_update_rpe_value(struct xe_guc_pc * pc)370 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
371 {
372 	struct xe_gt *gt = pc_to_gt(pc);
373 	u32 reg;
374 
375 	if (xe_gt_is_media_type(gt))
376 		reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
377 	else
378 		reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
379 
380 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
381 }
382 
tgl_update_rpe_value(struct xe_guc_pc * pc)383 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
384 {
385 	struct xe_gt *gt = pc_to_gt(pc);
386 	struct xe_device *xe = gt_to_xe(gt);
387 	u32 reg;
388 
389 	/*
390 	 * For PVC we still need to use fused RP1 as the approximation for RPe
391 	 * For other platforms than PVC we get the resolved RPe directly from
392 	 * PCODE at a different register
393 	 */
394 	if (xe->info.platform == XE_PVC)
395 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
396 	else
397 		reg = xe_mmio_read32(gt, FREQ_INFO_REC);
398 
399 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
400 }
401 
pc_update_rp_values(struct xe_guc_pc * pc)402 static void pc_update_rp_values(struct xe_guc_pc *pc)
403 {
404 	struct xe_gt *gt = pc_to_gt(pc);
405 	struct xe_device *xe = gt_to_xe(gt);
406 
407 	if (GRAPHICS_VERx100(xe) >= 1270)
408 		mtl_update_rpe_value(pc);
409 	else
410 		tgl_update_rpe_value(pc);
411 
412 	/*
413 	 * RPe is decided at runtime by PCODE. In the rare case where that's
414 	 * smaller than the fused min, we will trust the PCODE and use that
415 	 * as our minimum one.
416 	 */
417 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
418 }
419 
420 /**
421  * xe_guc_pc_get_act_freq - Get Actual running frequency
422  * @pc: The GuC PC
423  *
424  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
425  */
xe_guc_pc_get_act_freq(struct xe_guc_pc * pc)426 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
427 {
428 	struct xe_gt *gt = pc_to_gt(pc);
429 	struct xe_device *xe = gt_to_xe(gt);
430 	u32 freq;
431 
432 	/* When in RC6, actual frequency reported will be 0. */
433 	if (GRAPHICS_VERx100(xe) >= 1270) {
434 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
435 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
436 	} else {
437 		freq = xe_mmio_read32(gt, GT_PERF_STATUS);
438 		freq = REG_FIELD_GET(CAGF_MASK, freq);
439 	}
440 
441 	freq = decode_freq(freq);
442 
443 	return freq;
444 }
445 
446 /**
447  * xe_guc_pc_get_cur_freq - Get Current requested frequency
448  * @pc: The GuC PC
449  * @freq: A pointer to a u32 where the freq value will be returned
450  *
451  * Returns: 0 on success,
452  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
453  */
xe_guc_pc_get_cur_freq(struct xe_guc_pc * pc,u32 * freq)454 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
455 {
456 	struct xe_gt *gt = pc_to_gt(pc);
457 	int ret;
458 
459 	/*
460 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
461 	 * Block RC6 for a more reliable read.
462 	 */
463 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
464 	if (ret)
465 		return ret;
466 
467 	*freq = xe_mmio_read32(gt, RPNSWREQ);
468 
469 	*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
470 	*freq = decode_freq(*freq);
471 
472 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
473 	return 0;
474 }
475 
476 /**
477  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
478  * @pc: The GuC PC
479  *
480  * Returns: RP0 freq.
481  */
xe_guc_pc_get_rp0_freq(struct xe_guc_pc * pc)482 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
483 {
484 	return pc->rp0_freq;
485 }
486 
487 /**
488  * xe_guc_pc_get_rpe_freq - Get the RPe freq
489  * @pc: The GuC PC
490  *
491  * Returns: RPe freq.
492  */
xe_guc_pc_get_rpe_freq(struct xe_guc_pc * pc)493 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
494 {
495 	pc_update_rp_values(pc);
496 
497 	return pc->rpe_freq;
498 }
499 
500 /**
501  * xe_guc_pc_get_rpn_freq - Get the RPn freq
502  * @pc: The GuC PC
503  *
504  * Returns: RPn freq.
505  */
xe_guc_pc_get_rpn_freq(struct xe_guc_pc * pc)506 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
507 {
508 	return pc->rpn_freq;
509 }
510 
511 /**
512  * xe_guc_pc_get_min_freq - Get the min operational frequency
513  * @pc: The GuC PC
514  * @freq: A pointer to a u32 where the freq value will be returned
515  *
516  * Returns: 0 on success,
517  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
518  */
xe_guc_pc_get_min_freq(struct xe_guc_pc * pc,u32 * freq)519 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
520 {
521 	struct xe_gt *gt = pc_to_gt(pc);
522 	int ret;
523 
524 	mutex_lock(&pc->freq_lock);
525 	if (!pc->freq_ready) {
526 		/* Might be in the middle of a gt reset */
527 		ret = -EAGAIN;
528 		goto out;
529 	}
530 
531 	/*
532 	 * GuC SLPC plays with min freq request when GuCRC is enabled
533 	 * Block RC6 for a more reliable read.
534 	 */
535 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
536 	if (ret)
537 		goto out;
538 
539 	ret = pc_action_query_task_state(pc);
540 	if (ret)
541 		goto fw;
542 
543 	*freq = pc_get_min_freq(pc);
544 
545 fw:
546 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
547 out:
548 	mutex_unlock(&pc->freq_lock);
549 	return ret;
550 }
551 
552 /**
553  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
554  * @pc: The GuC PC
555  * @freq: The selected minimal frequency
556  *
557  * Returns: 0 on success,
558  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
559  *         -EINVAL if value out of bounds.
560  */
xe_guc_pc_set_min_freq(struct xe_guc_pc * pc,u32 freq)561 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
562 {
563 	int ret;
564 
565 	mutex_lock(&pc->freq_lock);
566 	if (!pc->freq_ready) {
567 		/* Might be in the middle of a gt reset */
568 		ret = -EAGAIN;
569 		goto out;
570 	}
571 
572 	ret = pc_set_min_freq(pc, freq);
573 	if (ret)
574 		goto out;
575 
576 	pc->user_requested_min = freq;
577 
578 out:
579 	mutex_unlock(&pc->freq_lock);
580 	return ret;
581 }
582 
583 /**
584  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
585  * @pc: The GuC PC
586  * @freq: A pointer to a u32 where the freq value will be returned
587  *
588  * Returns: 0 on success,
589  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
590  */
xe_guc_pc_get_max_freq(struct xe_guc_pc * pc,u32 * freq)591 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
592 {
593 	int ret;
594 
595 	mutex_lock(&pc->freq_lock);
596 	if (!pc->freq_ready) {
597 		/* Might be in the middle of a gt reset */
598 		ret = -EAGAIN;
599 		goto out;
600 	}
601 
602 	ret = pc_action_query_task_state(pc);
603 	if (ret)
604 		goto out;
605 
606 	*freq = pc_get_max_freq(pc);
607 
608 out:
609 	mutex_unlock(&pc->freq_lock);
610 	return ret;
611 }
612 
613 /**
614  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
615  * @pc: The GuC PC
616  * @freq: The selected maximum frequency value
617  *
618  * Returns: 0 on success,
619  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
620  *         -EINVAL if value out of bounds.
621  */
xe_guc_pc_set_max_freq(struct xe_guc_pc * pc,u32 freq)622 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
623 {
624 	int ret;
625 
626 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
627 		if (wait_for_flush_complete(pc) != 0)
628 			return -EAGAIN;
629 	}
630 
631 	mutex_lock(&pc->freq_lock);
632 	if (!pc->freq_ready) {
633 		/* Might be in the middle of a gt reset */
634 		ret = -EAGAIN;
635 		goto out;
636 	}
637 
638 	ret = pc_set_max_freq(pc, freq);
639 	if (ret)
640 		goto out;
641 
642 	pc->user_requested_max = freq;
643 
644 out:
645 	mutex_unlock(&pc->freq_lock);
646 	return ret;
647 }
648 
649 /**
650  * xe_guc_pc_c_status - get the current GT C state
651  * @pc: XE_GuC_PC instance
652  */
xe_guc_pc_c_status(struct xe_guc_pc * pc)653 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
654 {
655 	struct xe_gt *gt = pc_to_gt(pc);
656 	u32 reg, gt_c_state;
657 
658 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
659 		reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
660 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
661 	} else {
662 		reg = xe_mmio_read32(gt, GT_CORE_STATUS);
663 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
664 	}
665 
666 	switch (gt_c_state) {
667 	case GT_C6:
668 		return GT_IDLE_C6;
669 	case GT_C0:
670 		return GT_IDLE_C0;
671 	default:
672 		return GT_IDLE_UNKNOWN;
673 	}
674 }
675 
676 /**
677  * xe_guc_pc_rc6_residency - rc6 residency counter
678  * @pc: Xe_GuC_PC instance
679  */
xe_guc_pc_rc6_residency(struct xe_guc_pc * pc)680 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
681 {
682 	struct xe_gt *gt = pc_to_gt(pc);
683 	u32 reg;
684 
685 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
686 
687 	return reg;
688 }
689 
690 /**
691  * xe_guc_pc_mc6_residency - mc6 residency counter
692  * @pc: Xe_GuC_PC instance
693  */
xe_guc_pc_mc6_residency(struct xe_guc_pc * pc)694 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
695 {
696 	struct xe_gt *gt = pc_to_gt(pc);
697 	u64 reg;
698 
699 	reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
700 
701 	return reg;
702 }
703 
mtl_init_fused_rp_values(struct xe_guc_pc * pc)704 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
705 {
706 	struct xe_gt *gt = pc_to_gt(pc);
707 	u32 reg;
708 
709 	xe_device_assert_mem_access(pc_to_xe(pc));
710 
711 	if (xe_gt_is_media_type(gt))
712 		reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
713 	else
714 		reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
715 
716 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
717 
718 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
719 }
720 
tgl_init_fused_rp_values(struct xe_guc_pc * pc)721 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
722 {
723 	struct xe_gt *gt = pc_to_gt(pc);
724 	struct xe_device *xe = gt_to_xe(gt);
725 	u32 reg;
726 
727 	xe_device_assert_mem_access(pc_to_xe(pc));
728 
729 	if (xe->info.platform == XE_PVC)
730 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
731 	else
732 		reg = xe_mmio_read32(gt, RP_STATE_CAP);
733 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
734 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
735 }
736 
pc_init_fused_rp_values(struct xe_guc_pc * pc)737 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
738 {
739 	struct xe_gt *gt = pc_to_gt(pc);
740 	struct xe_device *xe = gt_to_xe(gt);
741 
742 	if (GRAPHICS_VERx100(xe) >= 1270)
743 		mtl_init_fused_rp_values(pc);
744 	else
745 		tgl_init_fused_rp_values(pc);
746 }
747 
pc_max_freq_cap(struct xe_guc_pc * pc)748 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
749 {
750 	struct xe_gt *gt = pc_to_gt(pc);
751 
752 	if (XE_WA(gt, 22019338487)) {
753 		if (xe_gt_is_media_type(gt))
754 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
755 		else
756 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
757 	} else {
758 		return pc->rp0_freq;
759 	}
760 }
761 
762 /**
763  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
764  * frequency to allow faster GuC load times
765  * @pc: Xe_GuC_PC instance
766  */
xe_guc_pc_raise_unslice(struct xe_guc_pc * pc)767 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
768 {
769 	struct xe_gt *gt = pc_to_gt(pc);
770 
771 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
772 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
773 }
774 
775 /**
776  * xe_guc_pc_init_early - Initialize RPx values
777  * @pc: Xe_GuC_PC instance
778  */
xe_guc_pc_init_early(struct xe_guc_pc * pc)779 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
780 {
781 	struct xe_gt *gt = pc_to_gt(pc);
782 
783 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
784 	pc_init_fused_rp_values(pc);
785 }
786 
pc_adjust_freq_bounds(struct xe_guc_pc * pc)787 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
788 {
789 	int ret;
790 
791 	lockdep_assert_held(&pc->freq_lock);
792 
793 	ret = pc_action_query_task_state(pc);
794 	if (ret)
795 		goto out;
796 
797 	/*
798 	 * GuC defaults to some RPmax that is not actually achievable without
799 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
800 	 * regular maximum
801 	 */
802 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
803 		ret = pc_set_max_freq(pc, pc->rp0_freq);
804 		if (ret)
805 			goto out;
806 	}
807 
808 	/*
809 	 * Same thing happens for Server platforms where min is listed as
810 	 * RPMax
811 	 */
812 	if (pc_get_min_freq(pc) > pc->rp0_freq)
813 		ret = pc_set_min_freq(pc, pc->rp0_freq);
814 
815 out:
816 	return ret;
817 }
818 
pc_adjust_requested_freq(struct xe_guc_pc * pc)819 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
820 {
821 	int ret = 0;
822 
823 	lockdep_assert_held(&pc->freq_lock);
824 
825 	if (pc->user_requested_min != 0) {
826 		ret = pc_set_min_freq(pc, pc->user_requested_min);
827 		if (ret)
828 			return ret;
829 	}
830 
831 	if (pc->user_requested_max != 0) {
832 		ret = pc_set_max_freq(pc, pc->user_requested_max);
833 		if (ret)
834 			return ret;
835 	}
836 
837 	return ret;
838 }
839 
needs_flush_freq_limit(struct xe_guc_pc * pc)840 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
841 {
842 	struct xe_gt *gt = pc_to_gt(pc);
843 
844 	return  XE_WA(gt, 22019338487) &&
845 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
846 }
847 
848 /**
849  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
850  * @pc: the xe_guc_pc object
851  *
852  * As per the WA, reduce max GT frequency during L2 cache flush
853  */
xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc * pc)854 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
855 {
856 	struct xe_gt *gt = pc_to_gt(pc);
857 	u32 max_freq;
858 	int ret;
859 
860 	if (!needs_flush_freq_limit(pc))
861 		return;
862 
863 	mutex_lock(&pc->freq_lock);
864 
865 	if (!pc->freq_ready) {
866 		mutex_unlock(&pc->freq_lock);
867 		return;
868 	}
869 
870 	ret = pc_action_query_task_state(pc);
871 	if (ret) {
872 		mutex_unlock(&pc->freq_lock);
873 		return;
874 	}
875 
876 	max_freq = pc_get_max_freq(pc);
877 	if (max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
878 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
879 		if (ret) {
880 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
881 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
882 			mutex_unlock(&pc->freq_lock);
883 			return;
884 		}
885 
886 		atomic_set(&pc->flush_freq_limit, 1);
887 
888 		/*
889 		 * If user has previously changed max freq, stash that value to
890 		 * restore later, otherwise use the current max. New user
891 		 * requests wait on flush.
892 		 */
893 		if (pc->user_requested_max != 0)
894 			pc->stashed_max_freq = pc->user_requested_max;
895 		else
896 			pc->stashed_max_freq = max_freq;
897 	}
898 
899 	mutex_unlock(&pc->freq_lock);
900 
901 	/*
902 	 * Wait for actual freq to go below the flush cap: even if the previous
903 	 * max was below cap, the current one might still be above it
904 	 */
905 	ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
906 	if (ret)
907 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
908 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
909 }
910 
911 /**
912  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
913  * @pc: the xe_guc_pc object
914  *
915  * Retrieve the previous GT max frequency value.
916  */
xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc * pc)917 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
918 {
919 	struct xe_gt *gt = pc_to_gt(pc);
920 	int ret = 0;
921 
922 	if (!needs_flush_freq_limit(pc))
923 		return;
924 
925 	if (!atomic_read(&pc->flush_freq_limit))
926 		return;
927 
928 	mutex_lock(&pc->freq_lock);
929 
930 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
931 	if (ret)
932 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
933 			       pc->stashed_max_freq, ret);
934 
935 	atomic_set(&pc->flush_freq_limit, 0);
936 	mutex_unlock(&pc->freq_lock);
937 	wake_up_var(&pc->flush_freq_limit);
938 }
939 
pc_set_mert_freq_cap(struct xe_guc_pc * pc)940 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
941 {
942 	int ret = 0;
943 
944 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
945 		/*
946 		 * Get updated min/max and stash them.
947 		 */
948 		ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
949 		if (!ret)
950 			ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
951 		if (ret)
952 			return ret;
953 
954 		/*
955 		 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
956 		 */
957 		mutex_lock(&pc->freq_lock);
958 		ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
959 		if (!ret)
960 			ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
961 		mutex_unlock(&pc->freq_lock);
962 	}
963 
964 	return ret;
965 }
966 
967 /**
968  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
969  * @pc: The GuC PC
970  *
971  * Returns: 0 on success,
972  *          error code on failure
973  */
xe_guc_pc_restore_stashed_freq(struct xe_guc_pc * pc)974 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
975 {
976 	int ret = 0;
977 
978 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
979 		return 0;
980 
981 	mutex_lock(&pc->freq_lock);
982 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
983 	if (!ret)
984 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
985 	mutex_unlock(&pc->freq_lock);
986 
987 	return ret;
988 }
989 
990 /**
991  * xe_guc_pc_gucrc_disable - Disable GuC RC
992  * @pc: Xe_GuC_PC instance
993  *
994  * Disables GuC RC by taking control of RC6 back from GuC.
995  *
996  * Return: 0 on success, negative error code on error.
997  */
xe_guc_pc_gucrc_disable(struct xe_guc_pc * pc)998 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
999 {
1000 	struct xe_device *xe = pc_to_xe(pc);
1001 	struct xe_gt *gt = pc_to_gt(pc);
1002 	int ret = 0;
1003 
1004 	if (xe->info.skip_guc_pc)
1005 		return 0;
1006 
1007 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1008 	if (ret)
1009 		return ret;
1010 
1011 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1012 	if (ret)
1013 		return ret;
1014 
1015 	xe_gt_idle_disable_c6(gt);
1016 
1017 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
1018 
1019 	return 0;
1020 }
1021 
1022 /**
1023  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1024  * @pc: Xe_GuC_PC instance
1025  * @mode: new value of the mode.
1026  *
1027  * Return: 0 on success, negative error code on error
1028  */
xe_guc_pc_override_gucrc_mode(struct xe_guc_pc * pc,enum slpc_gucrc_mode mode)1029 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1030 {
1031 	int ret;
1032 
1033 	xe_pm_runtime_get(pc_to_xe(pc));
1034 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1035 	xe_pm_runtime_put(pc_to_xe(pc));
1036 
1037 	return ret;
1038 }
1039 
1040 /**
1041  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1042  * @pc: Xe_GuC_PC instance
1043  *
1044  * Return: 0 on success, negative error code on error
1045  */
xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc * pc)1046 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1047 {
1048 	int ret;
1049 
1050 	xe_pm_runtime_get(pc_to_xe(pc));
1051 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1052 	xe_pm_runtime_put(pc_to_xe(pc));
1053 
1054 	return ret;
1055 }
1056 
pc_init_pcode_freq(struct xe_guc_pc * pc)1057 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1058 {
1059 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1060 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1061 
1062 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1063 }
1064 
pc_init_freqs(struct xe_guc_pc * pc)1065 static int pc_init_freqs(struct xe_guc_pc *pc)
1066 {
1067 	int ret;
1068 
1069 	mutex_lock(&pc->freq_lock);
1070 
1071 	ret = pc_adjust_freq_bounds(pc);
1072 	if (ret)
1073 		goto out;
1074 
1075 	ret = pc_adjust_requested_freq(pc);
1076 	if (ret)
1077 		goto out;
1078 
1079 	pc_update_rp_values(pc);
1080 
1081 	pc_init_pcode_freq(pc);
1082 
1083 	/*
1084 	 * The frequencies are really ready for use only after the user
1085 	 * requested ones got restored.
1086 	 */
1087 	pc->freq_ready = true;
1088 
1089 out:
1090 	mutex_unlock(&pc->freq_lock);
1091 	return ret;
1092 }
1093 
1094 /**
1095  * xe_guc_pc_start - Start GuC's Power Conservation component
1096  * @pc: Xe_GuC_PC instance
1097  */
xe_guc_pc_start(struct xe_guc_pc * pc)1098 int xe_guc_pc_start(struct xe_guc_pc *pc)
1099 {
1100 	struct xe_device *xe = pc_to_xe(pc);
1101 	struct xe_gt *gt = pc_to_gt(pc);
1102 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1103 	int ret;
1104 
1105 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1106 
1107 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1108 	if (ret)
1109 		return ret;
1110 
1111 	if (xe->info.skip_guc_pc) {
1112 		if (xe->info.platform != XE_PVC)
1113 			xe_gt_idle_enable_c6(gt);
1114 
1115 		/* Request max possible since dynamic freq mgmt is not enabled */
1116 		pc_set_cur_freq(pc, UINT_MAX);
1117 
1118 		ret = 0;
1119 		goto out;
1120 	}
1121 
1122 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1123 	slpc_shared_data_write(pc, header.size, size);
1124 
1125 	ret = pc_action_reset(pc);
1126 	if (ret)
1127 		goto out;
1128 
1129 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
1130 		xe_gt_err(gt, "GuC PC Start failed\n");
1131 		ret = -EIO;
1132 		goto out;
1133 	}
1134 
1135 	ret = pc_init_freqs(pc);
1136 	if (ret)
1137 		goto out;
1138 
1139 	ret = pc_set_mert_freq_cap(pc);
1140 	if (ret)
1141 		goto out;
1142 
1143 	if (xe->info.platform == XE_PVC) {
1144 		xe_guc_pc_gucrc_disable(pc);
1145 		ret = 0;
1146 		goto out;
1147 	}
1148 
1149 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1150 
1151 out:
1152 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
1153 	return ret;
1154 }
1155 
1156 /**
1157  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1158  * @pc: Xe_GuC_PC instance
1159  */
xe_guc_pc_stop(struct xe_guc_pc * pc)1160 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1161 {
1162 	struct xe_device *xe = pc_to_xe(pc);
1163 
1164 	if (xe->info.skip_guc_pc) {
1165 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1166 		return 0;
1167 	}
1168 
1169 	mutex_lock(&pc->freq_lock);
1170 	pc->freq_ready = false;
1171 	mutex_unlock(&pc->freq_lock);
1172 
1173 	return 0;
1174 }
1175 
1176 /**
1177  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1178  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1179  */
xe_guc_pc_fini_hw(void * arg)1180 static void xe_guc_pc_fini_hw(void *arg)
1181 {
1182 	struct xe_guc_pc *pc = arg;
1183 	struct xe_device *xe = pc_to_xe(pc);
1184 
1185 	if (xe_device_wedged(xe))
1186 		return;
1187 
1188 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
1189 	xe_guc_pc_gucrc_disable(pc);
1190 	XE_WARN_ON(xe_guc_pc_stop(pc));
1191 
1192 	/* Bind requested freq to mert_freq_cap before unload */
1193 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1194 
1195 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1196 }
1197 
1198 /**
1199  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1200  * @pc: Xe_GuC_PC instance
1201  */
xe_guc_pc_init(struct xe_guc_pc * pc)1202 int xe_guc_pc_init(struct xe_guc_pc *pc)
1203 {
1204 	struct xe_gt *gt = pc_to_gt(pc);
1205 	struct xe_tile *tile = gt_to_tile(gt);
1206 	struct xe_device *xe = gt_to_xe(gt);
1207 	struct xe_bo *bo;
1208 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1209 	int err;
1210 
1211 	if (xe->info.skip_guc_pc)
1212 		return 0;
1213 
1214 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1215 	if (err)
1216 		return err;
1217 
1218 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1219 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1220 					  XE_BO_FLAG_GGTT |
1221 					  XE_BO_FLAG_GGTT_INVALIDATE);
1222 	if (IS_ERR(bo))
1223 		return PTR_ERR(bo);
1224 
1225 	pc->bo = bo;
1226 
1227 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1228 }
1229