1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bsearch.h>
8
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11
12 #include "abi/guc_actions_sriov_abi.h"
13 #include "abi/guc_communication_mmio_abi.h"
14 #include "abi/guc_klvs_abi.h"
15 #include "abi/guc_relay_actions_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18
19 #include "xe_assert.h"
20 #include "xe_device.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_gt_sriov_vf_types.h"
25 #include "xe_guc.h"
26 #include "xe_guc_hxg_helpers.h"
27 #include "xe_guc_relay.h"
28 #include "xe_mmio.h"
29 #include "xe_sriov.h"
30 #include "xe_uc_fw.h"
31 #include "xe_wopcm.h"
32
33 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
34
guc_action_vf_reset(struct xe_guc * guc)35 static int guc_action_vf_reset(struct xe_guc *guc)
36 {
37 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
38 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
39 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
40 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET),
41 };
42 int ret;
43
44 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
45
46 return ret > 0 ? -EPROTO : ret;
47 }
48
49 #define GUC_RESET_VF_STATE_RETRY_MAX 10
vf_reset_guc_state(struct xe_gt * gt)50 static int vf_reset_guc_state(struct xe_gt *gt)
51 {
52 unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
53 struct xe_guc *guc = >->uc.guc;
54 int err;
55
56 do {
57 err = guc_action_vf_reset(guc);
58 if (!err || err != -ETIMEDOUT)
59 break;
60 } while (--retry);
61
62 if (unlikely(err))
63 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
64 return err;
65 }
66
67 /**
68 * xe_gt_sriov_vf_reset - Reset GuC VF internal state.
69 * @gt: the &xe_gt
70 *
71 * It requires functional `GuC MMIO based communication`_.
72 *
73 * Return: 0 on success or a negative error code on failure.
74 */
xe_gt_sriov_vf_reset(struct xe_gt * gt)75 int xe_gt_sriov_vf_reset(struct xe_gt *gt)
76 {
77 if (!xe_device_uc_enabled(gt_to_xe(gt)))
78 return -ENODEV;
79
80 return vf_reset_guc_state(gt);
81 }
82
guc_action_match_version(struct xe_guc * guc,u32 wanted_branch,u32 wanted_major,u32 wanted_minor,u32 * branch,u32 * major,u32 * minor,u32 * patch)83 static int guc_action_match_version(struct xe_guc *guc,
84 u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
85 u32 *branch, u32 *major, u32 *minor, u32 *patch)
86 {
87 u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
88 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
89 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
90 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
91 GUC_ACTION_VF2GUC_MATCH_VERSION),
92 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
93 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
94 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
95 };
96 u32 response[GUC_MAX_MMIO_MSG_LEN];
97 int ret;
98
99 BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN);
100
101 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
102 if (unlikely(ret < 0))
103 return ret;
104
105 if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
106 return -EPROTO;
107
108 *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
109 *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
110 *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
111 *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
112
113 return 0;
114 }
115
vf_minimum_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)116 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
117 {
118 struct xe_device *xe = gt_to_xe(gt);
119
120 switch (xe->info.platform) {
121 case XE_TIGERLAKE ... XE_PVC:
122 /* 1.1 this is current baseline for Xe driver */
123 *branch = 0;
124 *major = 1;
125 *minor = 1;
126 break;
127 default:
128 /* 1.2 has support for the GMD_ID KLV */
129 *branch = 0;
130 *major = 1;
131 *minor = 2;
132 break;
133 }
134 }
135
vf_wanted_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)136 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
137 {
138 /* for now it's the same as minimum */
139 return vf_minimum_guc_version(gt, branch, major, minor);
140 }
141
vf_handshake_with_guc(struct xe_gt * gt)142 static int vf_handshake_with_guc(struct xe_gt *gt)
143 {
144 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version;
145 struct xe_guc *guc = >->uc.guc;
146 u32 wanted_branch, wanted_major, wanted_minor;
147 u32 branch, major, minor, patch;
148 int err;
149
150 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
151
152 /* select wanted version - prefer previous (if any) */
153 if (guc_version->major || guc_version->minor) {
154 wanted_branch = guc_version->branch;
155 wanted_major = guc_version->major;
156 wanted_minor = guc_version->minor;
157 } else {
158 vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
159 xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
160 }
161
162 err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
163 &branch, &major, &minor, &patch);
164 if (unlikely(err))
165 goto fail;
166
167 /* we don't support interface version change */
168 if ((guc_version->major || guc_version->minor) &&
169 (guc_version->branch != branch || guc_version->major != major ||
170 guc_version->minor != minor)) {
171 xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
172 branch, major, minor, patch);
173 xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
174 guc_version->branch, guc_version->major,
175 guc_version->minor, guc_version->patch);
176 err = -EREMCHG;
177 goto fail;
178 }
179
180 /* illegal */
181 if (major > wanted_major) {
182 err = -EPROTO;
183 goto unsupported;
184 }
185
186 /* there's no fallback on major version. */
187 if (major != wanted_major) {
188 err = -ENOPKG;
189 goto unsupported;
190 }
191
192 /* check against minimum version supported by us */
193 vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
194 xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
195 if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
196 err = -ENOKEY;
197 goto unsupported;
198 }
199
200 xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
201 branch, major, minor, patch);
202
203 guc_version->branch = branch;
204 guc_version->major = major;
205 guc_version->minor = minor;
206 guc_version->patch = patch;
207 return 0;
208
209 unsupported:
210 xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
211 branch, major, minor, patch, ERR_PTR(err));
212 fail:
213 xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
214 wanted_major, wanted_minor, ERR_PTR(err));
215
216 /* try again with *any* just to query which version is supported */
217 if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
218 GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
219 &branch, &major, &minor, &patch))
220 xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
221 branch, major, minor, patch);
222 return err;
223 }
224
225 /**
226 * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version.
227 * @gt: the &xe_gt
228 *
229 * This function is for VF use only.
230 * It requires functional `GuC MMIO based communication`_.
231 *
232 * Return: 0 on success or a negative error code on failure.
233 */
xe_gt_sriov_vf_bootstrap(struct xe_gt * gt)234 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
235 {
236 int err;
237
238 if (!xe_device_uc_enabled(gt_to_xe(gt)))
239 return -ENODEV;
240
241 err = vf_reset_guc_state(gt);
242 if (unlikely(err))
243 return err;
244
245 err = vf_handshake_with_guc(gt);
246 if (unlikely(err))
247 return err;
248
249 return 0;
250 }
251
guc_action_query_single_klv(struct xe_guc * guc,u32 key,u32 * value,u32 value_len)252 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
253 u32 *value, u32 value_len)
254 {
255 u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = {
256 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
257 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
258 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
259 GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV),
260 FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key),
261 };
262 u32 response[GUC_MAX_MMIO_MSG_LEN];
263 u32 length;
264 int ret;
265
266 BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN);
267 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
268 if (unlikely(ret < 0))
269 return ret;
270
271 if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0])))
272 return -EPROTO;
273
274 length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]);
275 if (unlikely(length > value_len))
276 return -EOVERFLOW;
277 if (unlikely(length < value_len))
278 return -ENODATA;
279
280 switch (value_len) {
281 default:
282 xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3);
283 fallthrough;
284 case 3:
285 value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]);
286 fallthrough;
287 case 2:
288 value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]);
289 fallthrough;
290 case 1:
291 value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]);
292 fallthrough;
293 case 0:
294 break;
295 }
296
297 return 0;
298 }
299
guc_action_query_single_klv32(struct xe_guc * guc,u32 key,u32 * value32)300 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32)
301 {
302 return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32));
303 }
304
guc_action_query_single_klv64(struct xe_guc * guc,u32 key,u64 * value64)305 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64)
306 {
307 u32 value[2];
308 int err;
309
310 err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value));
311 if (unlikely(err))
312 return err;
313
314 *value64 = make_u64_from_u32(value[1], value[0]);
315 return 0;
316 }
317
has_gmdid(struct xe_device * xe)318 static bool has_gmdid(struct xe_device *xe)
319 {
320 return GRAPHICS_VERx100(xe) >= 1270;
321 }
322
323 /**
324 * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO.
325 * @gt: the &xe_gt
326 *
327 * This function is for VF use only.
328 *
329 * Return: value of GMDID KLV on success or 0 on failure.
330 */
xe_gt_sriov_vf_gmdid(struct xe_gt * gt)331 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
332 {
333 const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics";
334 struct xe_guc *guc = >->uc.guc;
335 u32 value;
336 int err;
337
338 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
339 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt)));
340 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2);
341
342 err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value);
343 if (unlikely(err)) {
344 xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n",
345 type, ERR_PTR(err));
346 return 0;
347 }
348
349 xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value);
350 return value;
351 }
352
vf_get_ggtt_info(struct xe_gt * gt)353 static int vf_get_ggtt_info(struct xe_gt *gt)
354 {
355 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
356 struct xe_guc *guc = >->uc.guc;
357 u64 start, size;
358 int err;
359
360 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
361
362 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
363 if (unlikely(err))
364 return err;
365
366 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size);
367 if (unlikely(err))
368 return err;
369
370 if (config->ggtt_size && config->ggtt_size != size) {
371 xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
372 size / SZ_1K, config->ggtt_size / SZ_1K);
373 return -EREMCHG;
374 }
375
376 xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
377 start, start + size - 1, size / SZ_1K);
378
379 config->ggtt_base = start;
380 config->ggtt_size = size;
381
382 return config->ggtt_size ? 0 : -ENODATA;
383 }
384
vf_get_lmem_info(struct xe_gt * gt)385 static int vf_get_lmem_info(struct xe_gt *gt)
386 {
387 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
388 struct xe_guc *guc = >->uc.guc;
389 char size_str[10];
390 u64 size;
391 int err;
392
393 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
394
395 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
396 if (unlikely(err))
397 return err;
398
399 if (config->lmem_size && config->lmem_size != size) {
400 xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
401 size / SZ_1M, config->lmem_size / SZ_1M);
402 return -EREMCHG;
403 }
404
405 string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
406 xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
407
408 config->lmem_size = size;
409
410 return config->lmem_size ? 0 : -ENODATA;
411 }
412
vf_get_submission_cfg(struct xe_gt * gt)413 static int vf_get_submission_cfg(struct xe_gt *gt)
414 {
415 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
416 struct xe_guc *guc = >->uc.guc;
417 u32 num_ctxs, num_dbs;
418 int err;
419
420 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
421
422 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
423 if (unlikely(err))
424 return err;
425
426 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs);
427 if (unlikely(err))
428 return err;
429
430 if (config->num_ctxs && config->num_ctxs != num_ctxs) {
431 xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n",
432 num_ctxs, config->num_ctxs);
433 return -EREMCHG;
434 }
435 if (config->num_dbs && config->num_dbs != num_dbs) {
436 xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n",
437 num_dbs, config->num_dbs);
438 return -EREMCHG;
439 }
440
441 xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs);
442
443 config->num_ctxs = num_ctxs;
444 config->num_dbs = num_dbs;
445
446 return config->num_ctxs ? 0 : -ENODATA;
447 }
448
vf_cache_gmdid(struct xe_gt * gt)449 static void vf_cache_gmdid(struct xe_gt *gt)
450 {
451 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt)));
452 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
453
454 gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt);
455 }
456
457 /**
458 * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
459 * @gt: the &xe_gt
460 *
461 * This function is for VF use only.
462 *
463 * Return: 0 on success or a negative error code on failure.
464 */
xe_gt_sriov_vf_query_config(struct xe_gt * gt)465 int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
466 {
467 struct xe_device *xe = gt_to_xe(gt);
468 int err;
469
470 err = vf_get_ggtt_info(gt);
471 if (unlikely(err))
472 return err;
473
474 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
475 err = vf_get_lmem_info(gt);
476 if (unlikely(err))
477 return err;
478 }
479
480 err = vf_get_submission_cfg(gt);
481 if (unlikely(err))
482 return err;
483
484 if (has_gmdid(xe))
485 vf_cache_gmdid(gt);
486
487 return 0;
488 }
489
490 /**
491 * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration.
492 * @gt: the &xe_gt
493 *
494 * This function is for VF use only.
495 *
496 * Return: number of GuC context IDs assigned to VF.
497 */
xe_gt_sriov_vf_guc_ids(struct xe_gt * gt)498 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
499 {
500 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
501 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
502 xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
503
504 return gt->sriov.vf.self_config.num_ctxs;
505 }
506
507 /**
508 * xe_gt_sriov_vf_lmem - VF LMEM configuration.
509 * @gt: the &xe_gt
510 *
511 * This function is for VF use only.
512 *
513 * Return: size of the LMEM assigned to VF.
514 */
xe_gt_sriov_vf_lmem(struct xe_gt * gt)515 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
516 {
517 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
518 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
519 xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
520
521 return gt->sriov.vf.self_config.lmem_size;
522 }
523
524 static struct xe_ggtt_node *
vf_balloon_ggtt_node(struct xe_ggtt * ggtt,u64 start,u64 end)525 vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
526 {
527 struct xe_ggtt_node *node;
528 int err;
529
530 node = xe_ggtt_node_init(ggtt);
531 if (IS_ERR(node))
532 return node;
533
534 err = xe_ggtt_node_insert_balloon(node, start, end);
535 if (err) {
536 xe_ggtt_node_fini(node);
537 return ERR_PTR(err);
538 }
539
540 return node;
541 }
542
vf_balloon_ggtt(struct xe_gt * gt)543 static int vf_balloon_ggtt(struct xe_gt *gt)
544 {
545 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
546 struct xe_tile *tile = gt_to_tile(gt);
547 struct xe_ggtt *ggtt = tile->mem.ggtt;
548 struct xe_device *xe = gt_to_xe(gt);
549 u64 start, end;
550
551 xe_gt_assert(gt, IS_SRIOV_VF(xe));
552 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
553
554 if (!config->ggtt_size)
555 return -ENODATA;
556
557 /*
558 * VF can only use part of the GGTT as allocated by the PF:
559 *
560 * WOPCM GUC_GGTT_TOP
561 * |<------------ Total GGTT size ------------------>|
562 *
563 * VF GGTT base -->|<- size ->|
564 *
565 * +--------------------+----------+-----------------+
566 * |////////////////////| block |\\\\\\\\\\\\\\\\\|
567 * +--------------------+----------+-----------------+
568 *
569 * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
570 */
571
572 start = xe_wopcm_size(xe);
573 end = config->ggtt_base;
574 if (end != start) {
575 tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
576 if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
577 return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
578 }
579
580 start = config->ggtt_base + config->ggtt_size;
581 end = GUC_GGTT_TOP;
582 if (end != start) {
583 tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
584 if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
585 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
586 return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
587 }
588 }
589
590 return 0;
591 }
592
deballoon_ggtt(struct drm_device * drm,void * arg)593 static void deballoon_ggtt(struct drm_device *drm, void *arg)
594 {
595 struct xe_tile *tile = arg;
596
597 xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
598 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
599 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
600 }
601
602 /**
603 * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
604 * @gt: the &xe_gt
605 *
606 * This function is for VF use only.
607 *
608 * Return: 0 on success or a negative error code on failure.
609 */
xe_gt_sriov_vf_prepare_ggtt(struct xe_gt * gt)610 int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
611 {
612 struct xe_tile *tile = gt_to_tile(gt);
613 struct xe_device *xe = tile_to_xe(tile);
614 int err;
615
616 if (xe_gt_is_media_type(gt))
617 return 0;
618
619 err = vf_balloon_ggtt(gt);
620 if (err)
621 return err;
622
623 return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
624 }
625
relay_action_handshake(struct xe_gt * gt,u32 * major,u32 * minor)626 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
627 {
628 u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
629 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
630 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
631 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE),
632 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) |
633 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor),
634 };
635 u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN];
636 int ret;
637
638 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
639
640 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
641 request, ARRAY_SIZE(request),
642 response, ARRAY_SIZE(response));
643 if (unlikely(ret < 0))
644 return ret;
645
646 if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN))
647 return -EPROTO;
648
649 if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0])))
650 return -EPROTO;
651
652 *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]);
653 *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]);
654
655 return 0;
656 }
657
vf_connect_pf(struct xe_gt * gt,u16 major,u16 minor)658 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
659 {
660 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
661
662 gt->sriov.vf.pf_version.major = major;
663 gt->sriov.vf.pf_version.minor = minor;
664 }
665
vf_disconnect_pf(struct xe_gt * gt)666 static void vf_disconnect_pf(struct xe_gt *gt)
667 {
668 vf_connect_pf(gt, 0, 0);
669 }
670
vf_handshake_with_pf(struct xe_gt * gt)671 static int vf_handshake_with_pf(struct xe_gt *gt)
672 {
673 u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
674 u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
675 u32 major = major_wanted, minor = minor_wanted;
676 int err;
677
678 err = relay_action_handshake(gt, &major, &minor);
679 if (unlikely(err))
680 goto failed;
681
682 if (!major && !minor) {
683 err = -ENODATA;
684 goto failed;
685 }
686
687 xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
688 vf_connect_pf(gt, major, minor);
689 return 0;
690
691 failed:
692 xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
693 major, minor, ERR_PTR(err));
694 vf_disconnect_pf(gt);
695 return err;
696 }
697
698 /**
699 * xe_gt_sriov_vf_connect - Establish connection with the PF driver.
700 * @gt: the &xe_gt
701 *
702 * This function is for VF use only.
703 *
704 * Return: 0 on success or a negative error code on failure.
705 */
xe_gt_sriov_vf_connect(struct xe_gt * gt)706 int xe_gt_sriov_vf_connect(struct xe_gt *gt)
707 {
708 int err;
709
710 err = vf_handshake_with_pf(gt);
711 if (unlikely(err))
712 goto failed;
713
714 return 0;
715
716 failed:
717 xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err));
718 return err;
719 }
720
vf_is_negotiated(struct xe_gt * gt,u16 major,u16 minor)721 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
722 {
723 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
724
725 return major == gt->sriov.vf.pf_version.major &&
726 minor <= gt->sriov.vf.pf_version.minor;
727 }
728
vf_prepare_runtime_info(struct xe_gt * gt,unsigned int num_regs)729 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
730 {
731 struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs;
732 unsigned int regs_size = round_up(num_regs, 4);
733 struct xe_device *xe = gt_to_xe(gt);
734
735 xe_gt_assert(gt, IS_SRIOV_VF(xe));
736
737 if (regs) {
738 if (num_regs <= gt->sriov.vf.runtime.regs_size) {
739 memset(regs, 0, num_regs * sizeof(*regs));
740 gt->sriov.vf.runtime.num_regs = num_regs;
741 return 0;
742 }
743
744 drmm_kfree(&xe->drm, regs);
745 gt->sriov.vf.runtime.regs = NULL;
746 gt->sriov.vf.runtime.num_regs = 0;
747 gt->sriov.vf.runtime.regs_size = 0;
748 }
749
750 regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL);
751 if (unlikely(!regs))
752 return -ENOMEM;
753
754 gt->sriov.vf.runtime.regs = regs;
755 gt->sriov.vf.runtime.num_regs = num_regs;
756 gt->sriov.vf.runtime.regs_size = regs_size;
757 return 0;
758 }
759
vf_query_runtime_info(struct xe_gt * gt)760 static int vf_query_runtime_info(struct xe_gt *gt)
761 {
762 u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN];
763 u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */
764 u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
765 u32 count, remaining, num, i;
766 u32 start = 0;
767 int ret;
768
769 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
770 xe_gt_assert(gt, limit);
771
772 /* this is part of the 1.0 PF/VF ABI */
773 if (!vf_is_negotiated(gt, 1, 0))
774 return -ENOPKG;
775
776 request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
777 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
778 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
779 GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) |
780 FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit);
781
782 repeat:
783 request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start);
784 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
785 request, ARRAY_SIZE(request),
786 response, ARRAY_SIZE(response));
787 if (unlikely(ret < 0))
788 goto failed;
789
790 if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) {
791 ret = -EPROTO;
792 goto failed;
793 }
794 if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) {
795 ret = -EPROTO;
796 goto failed;
797 }
798
799 num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
800 count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]);
801 remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]);
802
803 xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n",
804 count, num, ret, start, remaining);
805
806 if (unlikely(count != num)) {
807 ret = -EPROTO;
808 goto failed;
809 }
810
811 if (start == 0) {
812 ret = vf_prepare_runtime_info(gt, num + remaining);
813 if (unlikely(ret < 0))
814 goto failed;
815 } else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) {
816 ret = -EPROTO;
817 goto failed;
818 }
819
820 for (i = 0; i < num; ++i) {
821 struct vf_runtime_reg *reg = >->sriov.vf.runtime.regs[start + i];
822
823 reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i];
824 reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1];
825 }
826
827 if (remaining) {
828 start += num;
829 goto repeat;
830 }
831
832 return 0;
833
834 failed:
835 vf_prepare_runtime_info(gt, 0);
836 return ret;
837 }
838
vf_show_runtime_info(struct xe_gt * gt)839 static void vf_show_runtime_info(struct xe_gt *gt)
840 {
841 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
842 unsigned int size = gt->sriov.vf.runtime.num_regs;
843
844 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
845
846 for (; size--; vf_regs++)
847 xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n",
848 vf_regs->offset, vf_regs->value);
849 }
850
851 /**
852 * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data.
853 * @gt: the &xe_gt
854 *
855 * This function is for VF use only.
856 *
857 * Return: 0 on success or a negative error code on failure.
858 */
xe_gt_sriov_vf_query_runtime(struct xe_gt * gt)859 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
860 {
861 int err;
862
863 err = vf_query_runtime_info(gt);
864 if (unlikely(err))
865 goto failed;
866
867 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
868 vf_show_runtime_info(gt);
869
870 return 0;
871
872 failed:
873 xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
874 ERR_PTR(err));
875 return err;
876 }
877
vf_runtime_reg_cmp(const void * a,const void * b)878 static int vf_runtime_reg_cmp(const void *a, const void *b)
879 {
880 const struct vf_runtime_reg *ra = a;
881 const struct vf_runtime_reg *rb = b;
882
883 return (int)ra->offset - (int)rb->offset;
884 }
885
vf_lookup_reg(struct xe_gt * gt,u32 addr)886 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
887 {
888 struct xe_gt_sriov_vf_runtime *runtime = >->sriov.vf.runtime;
889 struct vf_runtime_reg key = { .offset = addr };
890
891 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
892
893 return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
894 vf_runtime_reg_cmp);
895 }
896
897 /**
898 * xe_gt_sriov_vf_read32 - Get a register value from the runtime data.
899 * @gt: the &xe_gt
900 * @reg: the register to read
901 *
902 * This function is for VF use only.
903 * This function shall be called after VF has connected to PF.
904 * This function is dedicated for registers that VFs can't read directly.
905 *
906 * Return: register value obtained from the PF or 0 if not found.
907 */
xe_gt_sriov_vf_read32(struct xe_gt * gt,struct xe_reg reg)908 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
909 {
910 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
911 struct vf_runtime_reg *rr;
912
913 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
914 xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
915 xe_gt_assert(gt, !reg.vf);
916
917 if (reg.addr == GMD_ID.addr) {
918 xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n",
919 addr, gt->sriov.vf.runtime.gmdid);
920 return gt->sriov.vf.runtime.gmdid;
921 }
922
923 rr = vf_lookup_reg(gt, addr);
924 if (!rr) {
925 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
926 "VF is trying to read an inaccessible register %#x+%#x\n",
927 reg.addr, addr - reg.addr);
928 return 0;
929 }
930
931 xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value);
932 return rr->value;
933 }
934
935 /**
936 * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
937 * @gt: the &xe_gt
938 * @reg: the register to write
939 * @val: value to write
940 *
941 * This function is for VF use only.
942 * Currently it will trigger a WARN if running on debug build.
943 */
xe_gt_sriov_vf_write32(struct xe_gt * gt,struct xe_reg reg,u32 val)944 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
945 {
946 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
947
948 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
949 xe_gt_assert(gt, !reg.vf);
950
951 /*
952 * In the future, we may want to handle selected writes to inaccessible
953 * registers in some custom way, but for now let's just log a warning
954 * about such attempt, as likely we might be doing something wrong.
955 */
956 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
957 "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
958 val, reg.addr, addr - reg.addr);
959 }
960
961 /**
962 * xe_gt_sriov_vf_print_config - Print VF self config.
963 * @gt: the &xe_gt
964 * @p: the &drm_printer
965 *
966 * This function is for VF use only.
967 */
xe_gt_sriov_vf_print_config(struct xe_gt * gt,struct drm_printer * p)968 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
969 {
970 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
971 struct xe_device *xe = gt_to_xe(gt);
972 char buf[10];
973
974 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
975
976 drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
977 config->ggtt_base,
978 config->ggtt_base + config->ggtt_size - 1);
979
980 string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
981 drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
982
983 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
984 string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
985 drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
986 }
987
988 drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
989 drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
990 }
991
992 /**
993 * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF.
994 * @gt: the &xe_gt
995 * @p: the &drm_printer
996 *
997 * This function is for VF use only.
998 */
xe_gt_sriov_vf_print_runtime(struct xe_gt * gt,struct drm_printer * p)999 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
1000 {
1001 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
1002 unsigned int size = gt->sriov.vf.runtime.num_regs;
1003
1004 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1005
1006 for (; size--; vf_regs++)
1007 drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value);
1008 }
1009
1010 /**
1011 * xe_gt_sriov_vf_print_version - Print VF ABI versions.
1012 * @gt: the &xe_gt
1013 * @p: the &drm_printer
1014 *
1015 * This function is for VF use only.
1016 */
xe_gt_sriov_vf_print_version(struct xe_gt * gt,struct drm_printer * p)1017 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
1018 {
1019 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version;
1020 struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version;
1021 u32 branch, major, minor;
1022
1023 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1024
1025 drm_printf(p, "GuC ABI:\n");
1026
1027 vf_minimum_guc_version(gt, &branch, &major, &minor);
1028 drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
1029
1030 vf_wanted_guc_version(gt, &branch, &major, &minor);
1031 drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
1032
1033 drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
1034 guc_version->branch, guc_version->major,
1035 guc_version->minor, guc_version->patch);
1036
1037 drm_printf(p, "PF ABI:\n");
1038
1039 drm_printf(p, "\tbase:\t%u.%u\n",
1040 GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR);
1041 drm_printf(p, "\twanted:\t%u.%u\n",
1042 GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR);
1043 drm_printf(p, "\thandshake:\t%u.%u\n",
1044 pf_version->major, pf_version->minor);
1045 }
1046