1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Vinit Azad <vinit.azad@intel.com>
25 * Ben Widawsky <ben@bwidawsk.net>
26 * Dave Gordon <david.s.gordon@intel.com>
27 * Alex Dai <yu.dai@intel.com>
28 */
29 #include "i915_drv.h"
30 #include "intel_uc.h"
31
32 /**
33 * DOC: GuC-specific firmware loader
34 *
35 * intel_guc:
36 * Top level structure of guc. It handles firmware loading and manages client
37 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
38 * ExecList submission.
39 *
40 * Firmware versioning:
41 * The firmware build process will generate a version header file with major and
42 * minor version defined. The versions are built into CSS header of firmware.
43 * i915 kernel driver set the minimal firmware version required per platform.
44 * The firmware installation package will install (symbolic link) proper version
45 * of firmware.
46 *
47 * GuC address space:
48 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
49 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
50 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
51 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
52 *
53 */
54
55 #define SKL_FW_MAJOR 6
56 #define SKL_FW_MINOR 1
57
58 #define BXT_FW_MAJOR 8
59 #define BXT_FW_MINOR 7
60
61 #define KBL_FW_MAJOR 9
62 #define KBL_FW_MINOR 14
63
64 #define GUC_FW_PATH(platform, major, minor) \
65 "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
66
67 #define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
68 MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
69
70 #define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
71 MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
72
73 #define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
74 MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
75
76
get_gttype(struct drm_i915_private * dev_priv)77 static u32 get_gttype(struct drm_i915_private *dev_priv)
78 {
79 /* XXX: GT type based on PCI device ID? field seems unused by fw */
80 return 0;
81 }
82
get_core_family(struct drm_i915_private * dev_priv)83 static u32 get_core_family(struct drm_i915_private *dev_priv)
84 {
85 u32 gen = INTEL_GEN(dev_priv);
86
87 switch (gen) {
88 case 9:
89 return GUC_CORE_FAMILY_GEN9;
90
91 default:
92 MISSING_CASE(gen);
93 return GUC_CORE_FAMILY_UNKNOWN;
94 }
95 }
96
97 /*
98 * Initialise the GuC parameter block before starting the firmware
99 * transfer. These parameters are read by the firmware on startup
100 * and cannot be changed thereafter.
101 */
guc_params_init(struct drm_i915_private * dev_priv)102 static void guc_params_init(struct drm_i915_private *dev_priv)
103 {
104 struct intel_guc *guc = &dev_priv->guc;
105 u32 params[GUC_CTL_MAX_DWORDS];
106 int i;
107
108 memset(¶ms, 0, sizeof(params));
109
110 params[GUC_CTL_DEVICE_INFO] |=
111 (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
112 (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
113
114 /*
115 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
116 * second. This ARAR is calculated by:
117 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
118 */
119 params[GUC_CTL_ARAT_HIGH] = 0;
120 params[GUC_CTL_ARAT_LOW] = 100000000;
121
122 params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
123
124 params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
125 GUC_CTL_VCS2_ENABLED;
126
127 params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
128
129 if (i915.guc_log_level >= 0) {
130 params[GUC_CTL_DEBUG] =
131 i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
132 } else
133 params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
134
135 /* If GuC submission is enabled, set up additional parameters here */
136 if (i915.enable_guc_submission) {
137 u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
138 u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
139 u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
140
141 params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
142 params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
143
144 pgs >>= PAGE_SHIFT;
145 params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
146 (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
147
148 params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
149
150 /* Unmask this bit to enable the GuC's internal scheduler */
151 params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
152 }
153
154 I915_WRITE(SOFT_SCRATCH(0), 0);
155
156 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
157 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
158 }
159
160 /*
161 * Read the GuC status register (GUC_STATUS) and store it in the
162 * specified location; then return a boolean indicating whether
163 * the value matches either of two values representing completion
164 * of the GuC boot process.
165 *
166 * This is used for polling the GuC status in a wait_for()
167 * loop below.
168 */
guc_ucode_response(struct drm_i915_private * dev_priv,u32 * status)169 static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
170 u32 *status)
171 {
172 u32 val = I915_READ(GUC_STATUS);
173 u32 uk_val = val & GS_UKERNEL_MASK;
174 *status = val;
175 return (uk_val == GS_UKERNEL_READY ||
176 ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
177 }
178
179 /*
180 * Transfer the firmware image to RAM for execution by the microcontroller.
181 *
182 * Architecturally, the DMA engine is bidirectional, and can potentially even
183 * transfer between GTT locations. This functionality is left out of the API
184 * for now as there is no need for it.
185 *
186 * Note that GuC needs the CSS header plus uKernel code to be copied by the
187 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
188 */
guc_ucode_xfer_dma(struct drm_i915_private * dev_priv,struct i915_vma * vma)189 static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
190 struct i915_vma *vma)
191 {
192 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
193 unsigned long offset;
194 struct sg_table *sg = vma->pages;
195 u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
196 int i, ret = 0;
197
198 /* where RSA signature starts */
199 offset = guc_fw->rsa_offset;
200
201 /* Copy RSA signature from the fw image to HW for verification */
202 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
203 for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
204 I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
205
206 /* The header plus uCode will be copied to WOPCM via DMA, excluding any
207 * other components */
208 I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
209
210 /* Set the source address for the new blob */
211 offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
212 I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
213 I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
214
215 /*
216 * Set the DMA destination. Current uCode expects the code to be
217 * loaded at 8k; locations below this are used for the stack.
218 */
219 I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
220 I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
221
222 /* Finally start the DMA */
223 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
224
225 /*
226 * Wait for the DMA to complete & the GuC to start up.
227 * NB: Docs recommend not using the interrupt for completion.
228 * Measurements indicate this should take no more than 20ms, so a
229 * timeout here indicates that the GuC has failed and is unusable.
230 * (Higher levels of the driver will attempt to fall back to
231 * execlist mode if this happens.)
232 */
233 ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
234
235 DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
236 I915_READ(DMA_CTRL), status);
237
238 if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
239 DRM_ERROR("GuC firmware signature verification failed\n");
240 ret = -ENOEXEC;
241 }
242
243 DRM_DEBUG_DRIVER("returning %d\n", ret);
244
245 return ret;
246 }
247
intel_guc_wopcm_size(struct drm_i915_private * dev_priv)248 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
249 {
250 u32 wopcm_size = GUC_WOPCM_TOP;
251
252 /* On BXT, the top of WOPCM is reserved for RC6 context */
253 if (IS_GEN9_LP(dev_priv))
254 wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
255
256 return wopcm_size;
257 }
258
259 /*
260 * Load the GuC firmware blob into the MinuteIA.
261 */
guc_ucode_xfer(struct drm_i915_private * dev_priv)262 static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
263 {
264 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
265 struct i915_vma *vma;
266 int ret;
267
268 ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
269 if (ret) {
270 DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
271 return ret;
272 }
273
274 vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
275 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
276 if (IS_ERR(vma)) {
277 DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
278 return PTR_ERR(vma);
279 }
280
281 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
282
283 /* Enable MIA caching. GuC clock gating is disabled. */
284 I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
285
286 /* WaDisableMinuteIaClockGating:bxt */
287 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
288 I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
289 ~GUC_ENABLE_MIA_CLOCK_GATING));
290 }
291
292 /* WaC6DisallowByGfxPause:bxt */
293 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
294 I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
295
296 if (IS_GEN9_LP(dev_priv))
297 I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
298 else
299 I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
300
301 if (IS_GEN9(dev_priv)) {
302 /* DOP Clock Gating Enable for GuC clocks */
303 I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
304 I915_READ(GEN7_MISCCPCTL)));
305
306 /* allows for 5us (in 10ns units) before GT can go to RC6 */
307 I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
308 }
309
310 guc_params_init(dev_priv);
311
312 ret = guc_ucode_xfer_dma(dev_priv, vma);
313
314 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
315
316 /*
317 * We keep the object pages for reuse during resume. But we can unpin it
318 * now that DMA has completed, so it doesn't continue to take up space.
319 */
320 i915_vma_unpin(vma);
321
322 return ret;
323 }
324
325 /**
326 * intel_guc_init_hw() - finish preparing the GuC for activity
327 * @guc: intel_guc structure
328 *
329 * Called during driver loading and also after a GPU reset.
330 *
331 * The main action required here it to load the GuC uCode into the device.
332 * The firmware image should have already been fetched into memory by the
333 * earlier call to intel_guc_init(), so here we need only check that
334 * worked, and then transfer the image to the h/w.
335 *
336 * Return: non-zero code on error
337 */
intel_guc_init_hw(struct intel_guc * guc)338 int intel_guc_init_hw(struct intel_guc *guc)
339 {
340 struct drm_i915_private *dev_priv = guc_to_i915(guc);
341 const char *fw_path = guc->fw.path;
342 int ret;
343
344 DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
345 fw_path,
346 intel_uc_fw_status_repr(guc->fw.fetch_status),
347 intel_uc_fw_status_repr(guc->fw.load_status));
348
349 if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
350 return -EIO;
351
352 guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
353
354 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
355 intel_uc_fw_status_repr(guc->fw.fetch_status),
356 intel_uc_fw_status_repr(guc->fw.load_status));
357
358 ret = guc_ucode_xfer(dev_priv);
359
360 if (ret)
361 return -EAGAIN;
362
363 guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
364
365 DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
366 i915.enable_guc_submission ? "submission enabled" : "loaded",
367 guc->fw.path,
368 guc->fw.major_ver_found, guc->fw.minor_ver_found);
369
370 return 0;
371 }
372
373 /**
374 * intel_guc_select_fw() - selects GuC firmware for loading
375 * @guc: intel_guc struct
376 *
377 * Return: zero when we know firmware, non-zero in other case
378 */
intel_guc_select_fw(struct intel_guc * guc)379 int intel_guc_select_fw(struct intel_guc *guc)
380 {
381 struct drm_i915_private *dev_priv = guc_to_i915(guc);
382
383 guc->fw.path = NULL;
384 guc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
385 guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
386 guc->fw.type = INTEL_UC_FW_TYPE_GUC;
387
388 if (i915.guc_firmware_path) {
389 guc->fw.path = i915.guc_firmware_path;
390 guc->fw.major_ver_wanted = 0;
391 guc->fw.minor_ver_wanted = 0;
392 } else if (IS_SKYLAKE(dev_priv)) {
393 guc->fw.path = I915_SKL_GUC_UCODE;
394 guc->fw.major_ver_wanted = SKL_FW_MAJOR;
395 guc->fw.minor_ver_wanted = SKL_FW_MINOR;
396 } else if (IS_BROXTON(dev_priv)) {
397 guc->fw.path = I915_BXT_GUC_UCODE;
398 guc->fw.major_ver_wanted = BXT_FW_MAJOR;
399 guc->fw.minor_ver_wanted = BXT_FW_MINOR;
400 } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
401 guc->fw.path = I915_KBL_GUC_UCODE;
402 guc->fw.major_ver_wanted = KBL_FW_MAJOR;
403 guc->fw.minor_ver_wanted = KBL_FW_MINOR;
404 } else {
405 DRM_ERROR("No GuC firmware known for platform with GuC!\n");
406 return -ENOENT;
407 }
408
409 return 0;
410 }
411