1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014-2019 Intel Corporation
4 */
5
6 #include "gt/intel_gt.h"
7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_pm_irq.h"
9 #include "intel_guc.h"
10 #include "intel_guc_ads.h"
11 #include "intel_guc_submission.h"
12 #include "i915_drv.h"
13
14 /**
15 * DOC: GuC
16 *
17 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
18 * designed to offload some of the functionality usually performed by the host
19 * driver; currently the main operations it can take care of are:
20 *
21 * - Authentication of the HuC, which is required to fully enable HuC usage.
22 * - Low latency graphics context scheduling (a.k.a. GuC submission).
23 * - GT Power management.
24 *
25 * The enable_guc module parameter can be used to select which of those
26 * operations to enable within GuC. Note that not all the operations are
27 * supported on all gen9+ platforms.
28 *
29 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
30 * if at least one of the operations is selected. However, not loading the GuC
31 * might result in the loss of some features that do require the GuC (currently
32 * just the HuC, but more are expected to land in the future).
33 */
34
intel_guc_notify(struct intel_guc * guc)35 void intel_guc_notify(struct intel_guc *guc)
36 {
37 struct intel_gt *gt = guc_to_gt(guc);
38
39 /*
40 * On Gen11+, the value written to the register is passes as a payload
41 * to the FW. However, the FW currently treats all values the same way
42 * (H2G interrupt), so we can just write the value that the HW expects
43 * on older gens.
44 */
45 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
46 }
47
guc_send_reg(struct intel_guc * guc,u32 i)48 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
49 {
50 GEM_BUG_ON(!guc->send_regs.base);
51 GEM_BUG_ON(!guc->send_regs.count);
52 GEM_BUG_ON(i >= guc->send_regs.count);
53
54 return _MMIO(guc->send_regs.base + 4 * i);
55 }
56
intel_guc_init_send_regs(struct intel_guc * guc)57 void intel_guc_init_send_regs(struct intel_guc *guc)
58 {
59 struct intel_gt *gt = guc_to_gt(guc);
60 enum forcewake_domains fw_domains = 0;
61 unsigned int i;
62
63 if (INTEL_GEN(gt->i915) >= 11) {
64 guc->send_regs.base =
65 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
66 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
67 } else {
68 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
69 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
70 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
71 }
72
73 for (i = 0; i < guc->send_regs.count; i++) {
74 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
75 guc_send_reg(guc, i),
76 FW_REG_READ | FW_REG_WRITE);
77 }
78 guc->send_regs.fw_domains = fw_domains;
79 }
80
gen9_reset_guc_interrupts(struct intel_guc * guc)81 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
82 {
83 struct intel_gt *gt = guc_to_gt(guc);
84
85 assert_rpm_wakelock_held(>->i915->runtime_pm);
86
87 spin_lock_irq(>->irq_lock);
88 gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
89 spin_unlock_irq(>->irq_lock);
90 }
91
gen9_enable_guc_interrupts(struct intel_guc * guc)92 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
93 {
94 struct intel_gt *gt = guc_to_gt(guc);
95
96 assert_rpm_wakelock_held(>->i915->runtime_pm);
97
98 spin_lock_irq(>->irq_lock);
99 if (!guc->interrupts.enabled) {
100 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
101 gt->pm_guc_events);
102 guc->interrupts.enabled = true;
103 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
104 }
105 spin_unlock_irq(>->irq_lock);
106 }
107
gen9_disable_guc_interrupts(struct intel_guc * guc)108 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
109 {
110 struct intel_gt *gt = guc_to_gt(guc);
111
112 assert_rpm_wakelock_held(>->i915->runtime_pm);
113
114 spin_lock_irq(>->irq_lock);
115 guc->interrupts.enabled = false;
116
117 gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
118
119 spin_unlock_irq(>->irq_lock);
120 intel_synchronize_irq(gt->i915);
121
122 gen9_reset_guc_interrupts(guc);
123 }
124
gen11_reset_guc_interrupts(struct intel_guc * guc)125 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
126 {
127 struct intel_gt *gt = guc_to_gt(guc);
128
129 spin_lock_irq(>->irq_lock);
130 gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
131 spin_unlock_irq(>->irq_lock);
132 }
133
gen11_enable_guc_interrupts(struct intel_guc * guc)134 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
135 {
136 struct intel_gt *gt = guc_to_gt(guc);
137
138 spin_lock_irq(>->irq_lock);
139 if (!guc->interrupts.enabled) {
140 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
141
142 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
143 intel_uncore_write(gt->uncore,
144 GEN11_GUC_SG_INTR_ENABLE, events);
145 intel_uncore_write(gt->uncore,
146 GEN11_GUC_SG_INTR_MASK, ~events);
147 guc->interrupts.enabled = true;
148 }
149 spin_unlock_irq(>->irq_lock);
150 }
151
gen11_disable_guc_interrupts(struct intel_guc * guc)152 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
153 {
154 struct intel_gt *gt = guc_to_gt(guc);
155
156 spin_lock_irq(>->irq_lock);
157 guc->interrupts.enabled = false;
158
159 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
160 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
161
162 spin_unlock_irq(>->irq_lock);
163 intel_synchronize_irq(gt->i915);
164
165 gen11_reset_guc_interrupts(guc);
166 }
167
intel_guc_init_early(struct intel_guc * guc)168 void intel_guc_init_early(struct intel_guc *guc)
169 {
170 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
171
172 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
173 intel_guc_ct_init_early(&guc->ct);
174 intel_guc_log_init_early(&guc->log);
175 intel_guc_submission_init_early(guc);
176
177 mutex_init(&guc->send_mutex);
178 spin_lock_init(&guc->irq_lock);
179 if (INTEL_GEN(i915) >= 11) {
180 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
181 guc->interrupts.reset = gen11_reset_guc_interrupts;
182 guc->interrupts.enable = gen11_enable_guc_interrupts;
183 guc->interrupts.disable = gen11_disable_guc_interrupts;
184 } else {
185 guc->notify_reg = GUC_SEND_INTERRUPT;
186 guc->interrupts.reset = gen9_reset_guc_interrupts;
187 guc->interrupts.enable = gen9_enable_guc_interrupts;
188 guc->interrupts.disable = gen9_disable_guc_interrupts;
189 }
190 }
191
guc_ctl_debug_flags(struct intel_guc * guc)192 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
193 {
194 u32 level = intel_guc_log_get_level(&guc->log);
195 u32 flags = 0;
196
197 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
198 flags |= GUC_LOG_DISABLED;
199 else
200 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
201 GUC_LOG_VERBOSITY_SHIFT;
202
203 return flags;
204 }
205
guc_ctl_feature_flags(struct intel_guc * guc)206 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
207 {
208 u32 flags = 0;
209
210 if (!intel_guc_submission_is_used(guc))
211 flags |= GUC_CTL_DISABLE_SCHEDULER;
212
213 return flags;
214 }
215
guc_ctl_log_params_flags(struct intel_guc * guc)216 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
217 {
218 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
219 u32 flags;
220
221 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
222 #define UNIT SZ_1M
223 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
224 #else
225 #define UNIT SZ_4K
226 #define FLAG 0
227 #endif
228
229 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
230 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
231 BUILD_BUG_ON(!DPC_BUFFER_SIZE);
232 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
233 BUILD_BUG_ON(!ISR_BUFFER_SIZE);
234 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
235
236 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
237 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
238 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
239 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
240 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
241 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
242
243 flags = GUC_LOG_VALID |
244 GUC_LOG_NOTIFY_ON_HALF_FULL |
245 FLAG |
246 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
247 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
248 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
249 (offset << GUC_LOG_BUF_ADDR_SHIFT);
250
251 #undef UNIT
252 #undef FLAG
253
254 return flags;
255 }
256
guc_ctl_ads_flags(struct intel_guc * guc)257 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
258 {
259 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
260 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
261
262 return flags;
263 }
264
265 /*
266 * Initialise the GuC parameter block before starting the firmware
267 * transfer. These parameters are read by the firmware on startup
268 * and cannot be changed thereafter.
269 */
guc_init_params(struct intel_guc * guc)270 static void guc_init_params(struct intel_guc *guc)
271 {
272 u32 *params = guc->params;
273 int i;
274
275 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
276
277 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
278 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
279 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
280 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
281
282 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
283 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
284 }
285
286 /*
287 * Initialise the GuC parameter block before starting the firmware
288 * transfer. These parameters are read by the firmware on startup
289 * and cannot be changed thereafter.
290 */
intel_guc_write_params(struct intel_guc * guc)291 void intel_guc_write_params(struct intel_guc *guc)
292 {
293 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
294 int i;
295
296 /*
297 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
298 * they are power context saved so it's ok to release forcewake
299 * when we are done here and take it again at xfer time.
300 */
301 intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
302
303 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
304
305 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
306 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
307
308 intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
309 }
310
intel_guc_init(struct intel_guc * guc)311 int intel_guc_init(struct intel_guc *guc)
312 {
313 struct intel_gt *gt = guc_to_gt(guc);
314 int ret;
315
316 ret = intel_uc_fw_init(&guc->fw);
317 if (ret)
318 goto out;
319
320 ret = intel_guc_log_create(&guc->log);
321 if (ret)
322 goto err_fw;
323
324 ret = intel_guc_ads_create(guc);
325 if (ret)
326 goto err_log;
327 GEM_BUG_ON(!guc->ads_vma);
328
329 ret = intel_guc_ct_init(&guc->ct);
330 if (ret)
331 goto err_ads;
332
333 if (intel_guc_submission_is_used(guc)) {
334 /*
335 * This is stuff we need to have available at fw load time
336 * if we are planning to enable submission later
337 */
338 ret = intel_guc_submission_init(guc);
339 if (ret)
340 goto err_ct;
341 }
342
343 /* now that everything is perma-pinned, initialize the parameters */
344 guc_init_params(guc);
345
346 /* We need to notify the guc whenever we change the GGTT */
347 i915_ggtt_enable_guc(gt->ggtt);
348
349 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
350
351 return 0;
352
353 err_ct:
354 intel_guc_ct_fini(&guc->ct);
355 err_ads:
356 intel_guc_ads_destroy(guc);
357 err_log:
358 intel_guc_log_destroy(&guc->log);
359 err_fw:
360 intel_uc_fw_fini(&guc->fw);
361 out:
362 i915_probe_error(gt->i915, "failed with %d\n", ret);
363 return ret;
364 }
365
intel_guc_fini(struct intel_guc * guc)366 void intel_guc_fini(struct intel_guc *guc)
367 {
368 struct intel_gt *gt = guc_to_gt(guc);
369
370 if (!intel_uc_fw_is_loadable(&guc->fw))
371 return;
372
373 i915_ggtt_disable_guc(gt->ggtt);
374
375 if (intel_guc_submission_is_used(guc))
376 intel_guc_submission_fini(guc);
377
378 intel_guc_ct_fini(&guc->ct);
379
380 intel_guc_ads_destroy(guc);
381 intel_guc_log_destroy(&guc->log);
382 intel_uc_fw_fini(&guc->fw);
383 }
384
385 /*
386 * This function implements the MMIO based host to GuC interface.
387 */
intel_guc_send_mmio(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)388 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
389 u32 *response_buf, u32 response_buf_size)
390 {
391 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
392 u32 status;
393 int i;
394 int ret;
395
396 GEM_BUG_ON(!len);
397 GEM_BUG_ON(len > guc->send_regs.count);
398
399 /* We expect only action code */
400 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
401
402 /* If CT is available, we expect to use MMIO only during init/fini */
403 GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
404 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
405
406 mutex_lock(&guc->send_mutex);
407 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
408
409 for (i = 0; i < len; i++)
410 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
411
412 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
413
414 intel_guc_notify(guc);
415
416 /*
417 * No GuC command should ever take longer than 10ms.
418 * Fast commands should still complete in 10us.
419 */
420 ret = __intel_wait_for_register_fw(uncore,
421 guc_send_reg(guc, 0),
422 INTEL_GUC_MSG_TYPE_MASK,
423 INTEL_GUC_MSG_TYPE_RESPONSE <<
424 INTEL_GUC_MSG_TYPE_SHIFT,
425 10, 10, &status);
426 /* If GuC explicitly returned an error, convert it to -EIO */
427 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
428 ret = -EIO;
429
430 if (ret) {
431 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
432 action[0], ret, status);
433 goto out;
434 }
435
436 if (response_buf) {
437 int count = min(response_buf_size, guc->send_regs.count - 1);
438
439 for (i = 0; i < count; i++)
440 response_buf[i] = intel_uncore_read(uncore,
441 guc_send_reg(guc, i + 1));
442 }
443
444 /* Use data from the GuC response as our return value */
445 ret = INTEL_GUC_MSG_TO_DATA(status);
446
447 out:
448 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
449 mutex_unlock(&guc->send_mutex);
450
451 return ret;
452 }
453
intel_guc_to_host_process_recv_msg(struct intel_guc * guc,const u32 * payload,u32 len)454 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
455 const u32 *payload, u32 len)
456 {
457 u32 msg;
458
459 if (unlikely(!len))
460 return -EPROTO;
461
462 /* Make sure to handle only enabled messages */
463 msg = payload[0] & guc->msg_enabled_mask;
464
465 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
466 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
467 intel_guc_log_handle_flush_event(&guc->log);
468
469 return 0;
470 }
471
intel_guc_sample_forcewake(struct intel_guc * guc)472 int intel_guc_sample_forcewake(struct intel_guc *guc)
473 {
474 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
475 u32 action[2];
476
477 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
478 /* WaRsDisableCoarsePowerGating:skl,cnl */
479 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
480 action[1] = 0;
481 else
482 /* bit 0 and 1 are for Render and Media domain separately */
483 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
484
485 return intel_guc_send(guc, action, ARRAY_SIZE(action));
486 }
487
488 /**
489 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
490 * @guc: intel_guc structure
491 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
492 *
493 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
494 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
495 * intel_huc_auth().
496 *
497 * Return: non-zero code on error
498 */
intel_guc_auth_huc(struct intel_guc * guc,u32 rsa_offset)499 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
500 {
501 u32 action[] = {
502 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
503 rsa_offset
504 };
505
506 return intel_guc_send(guc, action, ARRAY_SIZE(action));
507 }
508
509 /**
510 * intel_guc_suspend() - notify GuC entering suspend state
511 * @guc: the guc
512 */
intel_guc_suspend(struct intel_guc * guc)513 int intel_guc_suspend(struct intel_guc *guc)
514 {
515 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
516 int ret;
517 u32 status;
518 u32 action[] = {
519 INTEL_GUC_ACTION_ENTER_S_STATE,
520 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
521 };
522
523 /*
524 * If GuC communication is enabled but submission is not supported,
525 * we do not need to suspend the GuC.
526 */
527 if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
528 return 0;
529
530 /*
531 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
532 * and then returns, so waiting on the H2G is not enough to guarantee
533 * GuC is done. When all the processing is done, GuC writes
534 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
535 * on that. Note that GuC does not ensure that the value in the register
536 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
537 * in progress so we need to take care of that ourselves as well.
538 */
539
540 intel_uncore_write(uncore, SOFT_SCRATCH(14),
541 INTEL_GUC_SLEEP_STATE_INVALID_MASK);
542
543 ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
544 if (ret)
545 return ret;
546
547 ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
548 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
549 0, 0, 10, &status);
550 if (ret)
551 return ret;
552
553 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
554 DRM_ERROR("GuC failed to change sleep state. "
555 "action=0x%x, err=%u\n",
556 action[0], status);
557 return -EIO;
558 }
559
560 return 0;
561 }
562
563 /**
564 * intel_guc_reset_engine() - ask GuC to reset an engine
565 * @guc: intel_guc structure
566 * @engine: engine to be reset
567 */
intel_guc_reset_engine(struct intel_guc * guc,struct intel_engine_cs * engine)568 int intel_guc_reset_engine(struct intel_guc *guc,
569 struct intel_engine_cs *engine)
570 {
571 /* XXX: to be implemented with submission interface rework */
572
573 return -ENODEV;
574 }
575
576 /**
577 * intel_guc_resume() - notify GuC resuming from suspend state
578 * @guc: the guc
579 */
intel_guc_resume(struct intel_guc * guc)580 int intel_guc_resume(struct intel_guc *guc)
581 {
582 u32 action[] = {
583 INTEL_GUC_ACTION_EXIT_S_STATE,
584 GUC_POWER_D0,
585 };
586
587 /*
588 * If GuC communication is enabled but submission is not supported,
589 * we do not need to resume the GuC but we do need to enable the
590 * GuC communication on resume (above).
591 */
592 if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
593 return 0;
594
595 return intel_guc_send(guc, action, ARRAY_SIZE(action));
596 }
597
598 /**
599 * DOC: GuC Memory Management
600 *
601 * GuC can't allocate any memory for its own usage, so all the allocations must
602 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
603 * exception of the top and bottom parts of the 4GB address space, which are
604 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
605 * or other parts of the HW. The driver must take care not to place objects that
606 * the GuC is going to access in these reserved ranges. The layout of the GuC
607 * address space is shown below:
608 *
609 * ::
610 *
611 * +===========> +====================+ <== FFFF_FFFF
612 * ^ | Reserved |
613 * | +====================+ <== GUC_GGTT_TOP
614 * | | |
615 * | | DRAM |
616 * GuC | |
617 * Address +===> +====================+ <== GuC ggtt_pin_bias
618 * Space ^ | |
619 * | | | |
620 * | GuC | GuC |
621 * | WOPCM | WOPCM |
622 * | Size | |
623 * | | | |
624 * v v | |
625 * +=======+===> +====================+ <== 0000_0000
626 *
627 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
628 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
629 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
630 */
631
632 /**
633 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
634 * @guc: the guc
635 * @size: size of area to allocate (both virtual space and memory)
636 *
637 * This is a wrapper to create an object for use with the GuC. In order to
638 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
639 * both some backing storage and a range inside the Global GTT. We must pin
640 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
641 * range is reserved inside GuC.
642 *
643 * Return: A i915_vma if successful, otherwise an ERR_PTR.
644 */
intel_guc_allocate_vma(struct intel_guc * guc,u32 size)645 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
646 {
647 struct intel_gt *gt = guc_to_gt(guc);
648 struct drm_i915_gem_object *obj;
649 struct i915_vma *vma;
650 u64 flags;
651 int ret;
652
653 obj = i915_gem_object_create_shmem(gt->i915, size);
654 if (IS_ERR(obj))
655 return ERR_CAST(obj);
656
657 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
658 if (IS_ERR(vma))
659 goto err;
660
661 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
662 ret = i915_ggtt_pin(vma, NULL, 0, flags);
663 if (ret) {
664 vma = ERR_PTR(ret);
665 goto err;
666 }
667
668 return i915_vma_make_unshrinkable(vma);
669
670 err:
671 i915_gem_object_put(obj);
672 return vma;
673 }
674
675 /**
676 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
677 * @guc: the guc
678 * @size: size of area to allocate (both virtual space and memory)
679 * @out_vma: return variable for the allocated vma pointer
680 * @out_vaddr: return variable for the obj mapping
681 *
682 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
683 * object with I915_MAP_WB.
684 *
685 * Return: 0 if successful, a negative errno code otherwise.
686 */
intel_guc_allocate_and_map_vma(struct intel_guc * guc,u32 size,struct i915_vma ** out_vma,void ** out_vaddr)687 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
688 struct i915_vma **out_vma, void **out_vaddr)
689 {
690 struct i915_vma *vma;
691 void *vaddr;
692
693 vma = intel_guc_allocate_vma(guc, size);
694 if (IS_ERR(vma))
695 return PTR_ERR(vma);
696
697 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
698 if (IS_ERR(vaddr)) {
699 i915_vma_unpin_and_release(&vma, 0);
700 return PTR_ERR(vaddr);
701 }
702
703 *out_vma = vma;
704 *out_vaddr = vaddr;
705
706 return 0;
707 }
708
709 /**
710 * intel_guc_load_status - dump information about GuC load status
711 * @guc: the GuC
712 * @p: the &drm_printer
713 *
714 * Pretty printer for GuC load status.
715 */
intel_guc_load_status(struct intel_guc * guc,struct drm_printer * p)716 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
717 {
718 struct intel_gt *gt = guc_to_gt(guc);
719 struct intel_uncore *uncore = gt->uncore;
720 intel_wakeref_t wakeref;
721
722 if (!intel_guc_is_supported(guc)) {
723 drm_printf(p, "GuC not supported\n");
724 return;
725 }
726
727 if (!intel_guc_is_wanted(guc)) {
728 drm_printf(p, "GuC disabled\n");
729 return;
730 }
731
732 intel_uc_fw_dump(&guc->fw, p);
733
734 with_intel_runtime_pm(uncore->rpm, wakeref) {
735 u32 status = intel_uncore_read(uncore, GUC_STATUS);
736 u32 i;
737
738 drm_printf(p, "\nGuC status 0x%08x:\n", status);
739 drm_printf(p, "\tBootrom status = 0x%x\n",
740 (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
741 drm_printf(p, "\tuKernel status = 0x%x\n",
742 (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
743 drm_printf(p, "\tMIA Core status = 0x%x\n",
744 (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
745 drm_puts(p, "\nScratch registers:\n");
746 for (i = 0; i < 16; i++) {
747 drm_printf(p, "\t%2d: \t0x%x\n",
748 i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
749 }
750 }
751 }
752