1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11
12 #include "regs/xe_guc_regs.h"
13
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_db_mgr.h"
25 #include "xe_guc_fwif.h"
26 #include "xe_guc_id_mgr.h"
27 #include "xe_guc_klv_helpers.h"
28 #include "xe_guc_klv_thresholds_set.h"
29 #include "xe_guc_submit.h"
30 #include "xe_lmtt.h"
31 #include "xe_map.h"
32 #include "xe_migrate.h"
33 #include "xe_sriov.h"
34 #include "xe_ttm_vram_mgr.h"
35 #include "xe_wopcm.h"
36
37 /*
38 * Return: number of KLVs that were successfully parsed and saved,
39 * negative error code on failure.
40 */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)41 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
42 u64 addr, u32 size)
43 {
44 u32 request[] = {
45 GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
46 vfid,
47 lower_32_bits(addr),
48 upper_32_bits(addr),
49 size,
50 };
51
52 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
53 }
54
55 /*
56 * Return: 0 on success, negative error code on failure.
57 */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)58 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
59 {
60 struct xe_guc *guc = >->uc.guc;
61 int ret;
62
63 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
64
65 return ret <= 0 ? ret : -EPROTO;
66 }
67
68 /*
69 * Return: number of KLVs that were successfully parsed and saved,
70 * negative error code on failure.
71 */
pf_send_vf_cfg_klvs(struct xe_gt * gt,u32 vfid,const u32 * klvs,u32 num_dwords)72 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
73 {
74 const u32 bytes = num_dwords * sizeof(u32);
75 struct xe_tile *tile = gt_to_tile(gt);
76 struct xe_device *xe = tile_to_xe(tile);
77 struct xe_guc *guc = >->uc.guc;
78 struct xe_bo *bo;
79 int ret;
80
81 bo = xe_bo_create_pin_map(xe, tile, NULL,
82 ALIGN(bytes, PAGE_SIZE),
83 ttm_bo_type_kernel,
84 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
85 XE_BO_FLAG_GGTT |
86 XE_BO_FLAG_GGTT_INVALIDATE);
87 if (IS_ERR(bo))
88 return PTR_ERR(bo);
89
90 xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
91
92 ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
93
94 xe_bo_unpin_map_no_vm(bo);
95
96 return ret;
97 }
98
99 /*
100 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
101 * negative error code on failure.
102 */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)103 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
104 const u32 *klvs, u32 num_dwords)
105 {
106 int ret;
107
108 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
109
110 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
111
112 if (ret != num_klvs) {
113 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
114 struct drm_printer p = xe_gt_info_printer(gt);
115 char name[8];
116
117 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
118 xe_sriov_function_name(vfid, name, sizeof(name)),
119 num_klvs, str_plural(num_klvs), ERR_PTR(err));
120 xe_guc_klv_print(klvs, num_dwords, &p);
121 return err;
122 }
123
124 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
125 struct drm_printer p = xe_gt_info_printer(gt);
126
127 xe_guc_klv_print(klvs, num_dwords, &p);
128 }
129
130 return 0;
131 }
132
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)133 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
134 {
135 u32 klv[] = {
136 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
137 value,
138 };
139
140 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
141 }
142
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)143 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
144 {
145 u32 klv[] = {
146 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
147 lower_32_bits(value),
148 upper_32_bits(value),
149 };
150
151 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
152 }
153
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)154 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
155 {
156 u32 klvs[] = {
157 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
158 lower_32_bits(start),
159 upper_32_bits(start),
160 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
161 lower_32_bits(size),
162 upper_32_bits(size),
163 };
164
165 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
166 }
167
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)168 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
169 {
170 u32 klvs[] = {
171 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
172 begin,
173 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
174 num,
175 };
176
177 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
178 }
179
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)180 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
181 {
182 u32 klvs[] = {
183 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
184 begin,
185 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
186 num,
187 };
188
189 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
190 }
191
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)192 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
193 {
194 /* GuC will silently clamp values exceeding max */
195 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
196
197 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
198 }
199
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)200 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
201 {
202 /* GuC will silently clamp values exceeding max */
203 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
204
205 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
206 }
207
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)208 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
209 {
210 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
211 }
212
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)213 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
214 enum xe_guc_klv_threshold_index index, u32 value)
215 {
216 u32 key = xe_guc_klv_threshold_index_to_key(index);
217
218 xe_gt_assert(gt, key);
219 return pf_push_vf_cfg_u32(gt, vfid, key, value);
220 }
221
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)222 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
223 {
224 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
225 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
226 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
227
228 return >->sriov.pf.vfs[vfid].config;
229 }
230
231 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config)232 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
233 {
234 u32 n = 0;
235
236 if (xe_ggtt_node_allocated(config->ggtt_region)) {
237 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
238 cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
239 cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
240
241 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
242 cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
243 cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
244 }
245
246 return n;
247 }
248
249 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config)250 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
251 {
252 u32 n = 0;
253
254 n += encode_config_ggtt(cfg, config);
255
256 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
257 cfg[n++] = config->begin_ctx;
258
259 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
260 cfg[n++] = config->num_ctxs;
261
262 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
263 cfg[n++] = config->begin_db;
264
265 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
266 cfg[n++] = config->num_dbs;
267
268 if (config->lmem_obj) {
269 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
270 cfg[n++] = lower_32_bits(config->lmem_obj->size);
271 cfg[n++] = upper_32_bits(config->lmem_obj->size);
272 }
273
274 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
275 cfg[n++] = config->exec_quantum;
276
277 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
278 cfg[n++] = config->preempt_timeout;
279
280 #define encode_threshold_config(TAG, ...) ({ \
281 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
282 cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
283 });
284
285 MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
286 #undef encode_threshold_config
287
288 return n;
289 }
290
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)291 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
292 {
293 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
294 u32 max_cfg_dwords = SZ_4K / sizeof(u32);
295 u32 num_dwords;
296 int num_klvs;
297 u32 *cfg;
298 int err;
299
300 cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
301 if (!cfg)
302 return -ENOMEM;
303
304 num_dwords = encode_config(cfg, config);
305 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
306
307 if (xe_gt_is_media_type(gt)) {
308 struct xe_gt *primary = gt->tile->primary_gt;
309 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
310
311 /* media-GT will never include a GGTT config */
312 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
313
314 /* the GGTT config must be taken from the primary-GT instead */
315 num_dwords += encode_config_ggtt(cfg + num_dwords, other);
316 }
317 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
318
319 num_klvs = xe_guc_klv_count(cfg, num_dwords);
320 err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
321
322 kfree(cfg);
323 return err;
324 }
325
pf_push_vf_cfg(struct xe_gt * gt,unsigned int vfid,bool reset)326 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
327 {
328 int err = 0;
329
330 xe_gt_assert(gt, vfid);
331 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
332
333 if (reset)
334 err = pf_send_vf_cfg_reset(gt, vfid);
335 if (!err)
336 err = pf_push_full_vf_config(gt, vfid);
337
338 return err;
339 }
340
pf_refresh_vf_cfg(struct xe_gt * gt,unsigned int vfid)341 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
342 {
343 return pf_push_vf_cfg(gt, vfid, true);
344 }
345
pf_get_ggtt_alignment(struct xe_gt * gt)346 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
347 {
348 struct xe_device *xe = gt_to_xe(gt);
349
350 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
351 }
352
pf_get_min_spare_ggtt(struct xe_gt * gt)353 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
354 {
355 /* XXX: preliminary */
356 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
357 pf_get_ggtt_alignment(gt) : SZ_64M;
358 }
359
pf_get_spare_ggtt(struct xe_gt * gt)360 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
361 {
362 u64 spare;
363
364 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
365 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
366 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
367
368 spare = gt->sriov.pf.spare.ggtt_size;
369 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
370
371 return spare;
372 }
373
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)374 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
375 {
376 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
377 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
378 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
379
380 if (size && size < pf_get_min_spare_ggtt(gt))
381 return -EINVAL;
382
383 size = round_up(size, pf_get_ggtt_alignment(gt));
384 gt->sriov.pf.spare.ggtt_size = size;
385
386 return 0;
387 }
388
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)389 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
390 {
391 int err, err2 = 0;
392
393 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
394
395 if (tile->media_gt && !err)
396 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
397
398 return err ?: err2;
399 }
400
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)401 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
402 {
403 if (xe_ggtt_node_allocated(node)) {
404 /*
405 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
406 * is redundant, as PTE will be implicitly re-assigned to PF by
407 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
408 */
409 xe_ggtt_node_remove(node, false);
410 } else {
411 xe_ggtt_node_fini(node);
412 }
413 }
414
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)415 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
416 {
417 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
418 config->ggtt_region = NULL;
419 }
420
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)421 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
422 {
423 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
424 struct xe_ggtt_node *node;
425 struct xe_tile *tile = gt_to_tile(gt);
426 struct xe_ggtt *ggtt = tile->mem.ggtt;
427 u64 alignment = pf_get_ggtt_alignment(gt);
428 int err;
429
430 xe_gt_assert(gt, vfid);
431 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
432 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
433
434 size = round_up(size, alignment);
435
436 if (xe_ggtt_node_allocated(config->ggtt_region)) {
437 err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
438 if (unlikely(err))
439 return err;
440
441 pf_release_vf_config_ggtt(gt, config);
442
443 err = pf_refresh_vf_cfg(gt, vfid);
444 if (unlikely(err))
445 return err;
446 }
447 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
448
449 if (!size)
450 return 0;
451
452 node = xe_ggtt_node_init(ggtt);
453 if (IS_ERR(node))
454 return PTR_ERR(node);
455
456 err = xe_ggtt_node_insert(node, size, alignment);
457 if (unlikely(err))
458 goto err;
459
460 xe_ggtt_assign(node, vfid);
461 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
462 vfid, node->base.start, node->base.start + node->base.size - 1);
463
464 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
465 if (unlikely(err))
466 goto err;
467
468 config->ggtt_region = node;
469 return 0;
470 err:
471 pf_release_ggtt(tile, node);
472 return err;
473 }
474
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)475 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
476 {
477 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
478 struct xe_ggtt_node *node = config->ggtt_region;
479
480 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
481 return xe_ggtt_node_allocated(node) ? node->base.size : 0;
482 }
483
484 /**
485 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
486 * @gt: the &xe_gt
487 * @vfid: the VF identifier
488 *
489 * This function can only be called on PF.
490 *
491 * Return: size of the VF's assigned (or PF's spare) GGTT address space.
492 */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)493 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
494 {
495 u64 size;
496
497 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
498 if (vfid)
499 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
500 else
501 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
502 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
503
504 return size;
505 }
506
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)507 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
508 u64 actual, const char *what, int err)
509 {
510 char size[10];
511 char name[8];
512
513 xe_sriov_function_name(vfid, name, sizeof(name));
514
515 if (unlikely(err)) {
516 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
517 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
518 name, value, size, what, ERR_PTR(err));
519 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
520 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
521 name, actual, size, what);
522 return err;
523 }
524
525 /* the actual value may have changed during provisioning */
526 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
527 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
528 name, actual, size, what);
529 return 0;
530 }
531
532 /**
533 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
534 * @gt: the &xe_gt (can't be media)
535 * @vfid: the VF identifier
536 * @size: requested GGTT size
537 *
538 * If &vfid represents PF, then function will change PF's spare GGTT config.
539 *
540 * This function can only be called on PF.
541 *
542 * Return: 0 on success or a negative error code on failure.
543 */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)544 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
545 {
546 int err;
547
548 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
549
550 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
551 if (vfid)
552 err = pf_provision_vf_ggtt(gt, vfid, size);
553 else
554 err = pf_set_spare_ggtt(gt, size);
555 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
556
557 return pf_config_set_u64_done(gt, vfid, size,
558 xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
559 vfid ? "GGTT" : "spare GGTT", err);
560 }
561
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)562 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
563 u64 value, u64 (*get)(struct xe_gt*, unsigned int),
564 const char *what, unsigned int last, int err)
565 {
566 char size[10];
567
568 xe_gt_assert(gt, first);
569 xe_gt_assert(gt, num_vfs);
570 xe_gt_assert(gt, first <= last);
571
572 if (num_vfs == 1)
573 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
574
575 if (unlikely(err)) {
576 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
577 first, first + num_vfs - 1, what);
578 if (last > first)
579 pf_config_bulk_set_u64_done(gt, first, last - first, value,
580 get, what, last, 0);
581 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
582 }
583
584 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
585 value = get(gt, first);
586 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
587 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
588 first, first + num_vfs - 1, value, size, what);
589 return 0;
590 }
591
592 /**
593 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
594 * @gt: the &xe_gt (can't be media)
595 * @vfid: starting VF identifier (can't be 0)
596 * @num_vfs: number of VFs to provision
597 * @size: requested GGTT size
598 *
599 * This function can only be called on PF.
600 *
601 * Return: 0 on success or a negative error code on failure.
602 */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)603 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
604 unsigned int num_vfs, u64 size)
605 {
606 unsigned int n;
607 int err = 0;
608
609 xe_gt_assert(gt, vfid);
610 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
611
612 if (!num_vfs)
613 return 0;
614
615 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
616 for (n = vfid; n < vfid + num_vfs; n++) {
617 err = pf_provision_vf_ggtt(gt, n, size);
618 if (err)
619 break;
620 }
621 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
622
623 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
624 xe_gt_sriov_pf_config_get_ggtt,
625 "GGTT", n, err);
626 }
627
628 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)629 static u64 pf_get_max_ggtt(struct xe_gt *gt)
630 {
631 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
632 u64 alignment = pf_get_ggtt_alignment(gt);
633 u64 spare = pf_get_spare_ggtt(gt);
634 u64 max_hole;
635
636 max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
637
638 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
639 max_hole / SZ_1K, spare / SZ_1K);
640 return max_hole > spare ? max_hole - spare : 0;
641 }
642
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)643 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
644 {
645 u64 available = pf_get_max_ggtt(gt);
646 u64 alignment = pf_get_ggtt_alignment(gt);
647 u64 fair;
648
649 /*
650 * To simplify the logic we only look at single largest GGTT region
651 * as that will be always the best fit for 1 VF case, and most likely
652 * will also nicely cover other cases where VFs are provisioned on the
653 * fresh and idle PF driver, without any stale GGTT allocations spread
654 * in the middle of the full GGTT range.
655 */
656
657 fair = div_u64(available, num_vfs);
658 fair = ALIGN_DOWN(fair, alignment);
659 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
660 available / SZ_1K, num_vfs, fair / SZ_1K);
661 return fair;
662 }
663
664 /**
665 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
666 * @gt: the &xe_gt (can't be media)
667 * @vfid: starting VF identifier (can't be 0)
668 * @num_vfs: number of VFs to provision
669 *
670 * This function can only be called on PF.
671 *
672 * Return: 0 on success or a negative error code on failure.
673 */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)674 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
675 unsigned int num_vfs)
676 {
677 u64 fair;
678
679 xe_gt_assert(gt, vfid);
680 xe_gt_assert(gt, num_vfs);
681 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
682
683 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
684 fair = pf_estimate_fair_ggtt(gt, num_vfs);
685 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
686
687 if (!fair)
688 return -ENOSPC;
689
690 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
691 }
692
pf_get_min_spare_ctxs(struct xe_gt * gt)693 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
694 {
695 /* XXX: preliminary */
696 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
697 hweight64(gt->info.engine_mask) : SZ_256;
698 }
699
pf_get_spare_ctxs(struct xe_gt * gt)700 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
701 {
702 u32 spare;
703
704 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
705 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
706
707 spare = gt->sriov.pf.spare.num_ctxs;
708 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
709
710 return spare;
711 }
712
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)713 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
714 {
715 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
716 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
717
718 if (spare > GUC_ID_MAX)
719 return -EINVAL;
720
721 if (spare && spare < pf_get_min_spare_ctxs(gt))
722 return -EINVAL;
723
724 gt->sriov.pf.spare.num_ctxs = spare;
725
726 return 0;
727 }
728
729 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)730 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
731 {
732 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
733 unsigned int spare = pf_get_spare_ctxs(gt);
734
735 return xe_guc_id_mgr_reserve(idm, num, spare);
736 }
737
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)738 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
739 {
740 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
741
742 if (num)
743 xe_guc_id_mgr_release(idm, start, num);
744 }
745
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)746 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
747 {
748 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
749
750 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
751 config->begin_ctx = 0;
752 config->num_ctxs = 0;
753 }
754
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)755 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
756 {
757 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
758 int ret;
759
760 xe_gt_assert(gt, vfid);
761
762 if (num_ctxs > GUC_ID_MAX)
763 return -EINVAL;
764
765 if (config->num_ctxs) {
766 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
767 if (unlikely(ret))
768 return ret;
769
770 pf_release_config_ctxs(gt, config);
771
772 ret = pf_refresh_vf_cfg(gt, vfid);
773 if (unlikely(ret))
774 return ret;
775 }
776
777 if (!num_ctxs)
778 return 0;
779
780 ret = pf_reserve_ctxs(gt, num_ctxs);
781 if (unlikely(ret < 0))
782 return ret;
783
784 config->begin_ctx = ret;
785 config->num_ctxs = num_ctxs;
786
787 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
788 if (unlikely(ret)) {
789 pf_release_config_ctxs(gt, config);
790 return ret;
791 }
792
793 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
794 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
795 return 0;
796 }
797
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)798 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
799 {
800 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
801
802 return config->num_ctxs;
803 }
804
805 /**
806 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
807 * @gt: the &xe_gt
808 * @vfid: the VF identifier
809 *
810 * This function can only be called on PF.
811 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
812 *
813 * Return: VF's quota (or PF's spare).
814 */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)815 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
816 {
817 u32 num_ctxs;
818
819 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
820 if (vfid)
821 num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
822 else
823 num_ctxs = pf_get_spare_ctxs(gt);
824 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
825
826 return num_ctxs;
827 }
828
no_unit(u32 unused)829 static const char *no_unit(u32 unused)
830 {
831 return "";
832 }
833
spare_unit(u32 unused)834 static const char *spare_unit(u32 unused)
835 {
836 return " spare";
837 }
838
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)839 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
840 const char *what, const char *(*unit)(u32), int err)
841 {
842 char name[8];
843
844 xe_sriov_function_name(vfid, name, sizeof(name));
845
846 if (unlikely(err)) {
847 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
848 name, value, unit(value), what, ERR_PTR(err));
849 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
850 name, actual, unit(actual), what);
851 return err;
852 }
853
854 /* the actual value may have changed during provisioning */
855 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
856 name, actual, unit(actual), what);
857 return 0;
858 }
859
860 /**
861 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
862 * @gt: the &xe_gt
863 * @vfid: the VF identifier
864 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
865 *
866 * This function can only be called on PF.
867 *
868 * Return: 0 on success or a negative error code on failure.
869 */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)870 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
871 {
872 int err;
873
874 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
875 if (vfid)
876 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
877 else
878 err = pf_set_spare_ctxs(gt, num_ctxs);
879 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
880
881 return pf_config_set_u32_done(gt, vfid, num_ctxs,
882 xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
883 "GuC context IDs", vfid ? no_unit : spare_unit, err);
884 }
885
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)886 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
887 u32 value, u32 (*get)(struct xe_gt*, unsigned int),
888 const char *what, const char *(*unit)(u32),
889 unsigned int last, int err)
890 {
891 xe_gt_assert(gt, first);
892 xe_gt_assert(gt, num_vfs);
893 xe_gt_assert(gt, first <= last);
894
895 if (num_vfs == 1)
896 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
897
898 if (unlikely(err)) {
899 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
900 first, first + num_vfs - 1, what);
901 if (last > first)
902 pf_config_bulk_set_u32_done(gt, first, last - first, value,
903 get, what, unit, last, 0);
904 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
905 }
906
907 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
908 value = get(gt, first);
909 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
910 first, first + num_vfs - 1, value, unit(value), what);
911 return 0;
912 }
913
914 /**
915 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
916 * @gt: the &xe_gt
917 * @vfid: starting VF identifier
918 * @num_vfs: number of VFs to provision
919 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
920 *
921 * This function can only be called on PF.
922 *
923 * Return: 0 on success or a negative error code on failure.
924 */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)925 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
926 unsigned int num_vfs, u32 num_ctxs)
927 {
928 unsigned int n;
929 int err = 0;
930
931 xe_gt_assert(gt, vfid);
932
933 if (!num_vfs)
934 return 0;
935
936 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
937 for (n = vfid; n < vfid + num_vfs; n++) {
938 err = pf_provision_vf_ctxs(gt, n, num_ctxs);
939 if (err)
940 break;
941 }
942 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
943
944 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
945 xe_gt_sriov_pf_config_get_ctxs,
946 "GuC context IDs", no_unit, n, err);
947 }
948
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)949 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
950 {
951 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
952 u32 spare = pf_get_spare_ctxs(gt);
953 u32 fair = (idm->total - spare) / num_vfs;
954 int ret;
955
956 for (; fair; --fair) {
957 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
958 if (ret < 0)
959 continue;
960 xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
961 break;
962 }
963
964 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
965 return fair;
966 }
967
968 /**
969 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
970 * @gt: the &xe_gt
971 * @vfid: starting VF identifier (can't be 0)
972 * @num_vfs: number of VFs to provision (can't be 0)
973 *
974 * This function can only be called on PF.
975 *
976 * Return: 0 on success or a negative error code on failure.
977 */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)978 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
979 unsigned int num_vfs)
980 {
981 u32 fair;
982
983 xe_gt_assert(gt, vfid);
984 xe_gt_assert(gt, num_vfs);
985
986 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
987 fair = pf_estimate_fair_ctxs(gt, num_vfs);
988 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
989
990 if (!fair)
991 return -ENOSPC;
992
993 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
994 }
995
pf_get_min_spare_dbs(struct xe_gt * gt)996 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
997 {
998 /* XXX: preliminary, we don't use doorbells yet! */
999 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1000 }
1001
pf_get_spare_dbs(struct xe_gt * gt)1002 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1003 {
1004 u32 spare;
1005
1006 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1007 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1008
1009 spare = gt->sriov.pf.spare.num_dbs;
1010 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1011
1012 return spare;
1013 }
1014
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)1015 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1016 {
1017 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1018 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1019
1020 if (spare > GUC_NUM_DOORBELLS)
1021 return -EINVAL;
1022
1023 if (spare && spare < pf_get_min_spare_dbs(gt))
1024 return -EINVAL;
1025
1026 gt->sriov.pf.spare.num_dbs = spare;
1027 return 0;
1028 }
1029
1030 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1031 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1032 {
1033 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1034 unsigned int spare = pf_get_spare_dbs(gt);
1035
1036 return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1037 }
1038
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1039 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1040 {
1041 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1042
1043 if (num)
1044 xe_guc_db_mgr_release_range(dbm, start, num);
1045 }
1046
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1047 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1048 {
1049 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1050
1051 pf_release_dbs(gt, config->begin_db, config->num_dbs);
1052 config->begin_db = 0;
1053 config->num_dbs = 0;
1054 }
1055
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1056 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1057 {
1058 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1059 int ret;
1060
1061 xe_gt_assert(gt, vfid);
1062
1063 if (num_dbs > GUC_NUM_DOORBELLS)
1064 return -EINVAL;
1065
1066 if (config->num_dbs) {
1067 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1068 if (unlikely(ret))
1069 return ret;
1070
1071 pf_release_config_dbs(gt, config);
1072
1073 ret = pf_refresh_vf_cfg(gt, vfid);
1074 if (unlikely(ret))
1075 return ret;
1076 }
1077
1078 if (!num_dbs)
1079 return 0;
1080
1081 ret = pf_reserve_dbs(gt, num_dbs);
1082 if (unlikely(ret < 0))
1083 return ret;
1084
1085 config->begin_db = ret;
1086 config->num_dbs = num_dbs;
1087
1088 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1089 if (unlikely(ret)) {
1090 pf_release_config_dbs(gt, config);
1091 return ret;
1092 }
1093
1094 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1095 vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1096 return 0;
1097 }
1098
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1099 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1100 {
1101 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1102
1103 return config->num_dbs;
1104 }
1105
1106 /**
1107 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1108 * @gt: the &xe_gt
1109 * @vfid: the VF identifier
1110 *
1111 * This function can only be called on PF.
1112 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1113 *
1114 * Return: VF's quota (or PF's spare).
1115 */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1116 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1117 {
1118 u32 num_dbs;
1119
1120 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1121 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1122
1123 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1124 if (vfid)
1125 num_dbs = pf_get_vf_config_dbs(gt, vfid);
1126 else
1127 num_dbs = pf_get_spare_dbs(gt);
1128 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1129
1130 return num_dbs;
1131 }
1132
1133 /**
1134 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1135 * @gt: the &xe_gt
1136 * @vfid: the VF identifier
1137 * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1138 *
1139 * This function can only be called on PF.
1140 *
1141 * Return: 0 on success or a negative error code on failure.
1142 */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1143 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1144 {
1145 int err;
1146
1147 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1148 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1149
1150 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1151 if (vfid)
1152 err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1153 else
1154 err = pf_set_spare_dbs(gt, num_dbs);
1155 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1156
1157 return pf_config_set_u32_done(gt, vfid, num_dbs,
1158 xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1159 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1160 }
1161
1162 /**
1163 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1164 * @gt: the &xe_gt
1165 * @vfid: starting VF identifier (can't be 0)
1166 * @num_vfs: number of VFs to provision
1167 * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1168 *
1169 * This function can only be called on PF.
1170 *
1171 * Return: 0 on success or a negative error code on failure.
1172 */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1173 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1174 unsigned int num_vfs, u32 num_dbs)
1175 {
1176 unsigned int n;
1177 int err = 0;
1178
1179 xe_gt_assert(gt, vfid);
1180
1181 if (!num_vfs)
1182 return 0;
1183
1184 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1185 for (n = vfid; n < vfid + num_vfs; n++) {
1186 err = pf_provision_vf_dbs(gt, n, num_dbs);
1187 if (err)
1188 break;
1189 }
1190 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1191
1192 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1193 xe_gt_sriov_pf_config_get_dbs,
1194 "GuC doorbell IDs", no_unit, n, err);
1195 }
1196
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1197 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1198 {
1199 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1200 u32 spare = pf_get_spare_dbs(gt);
1201 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1202 int ret;
1203
1204 for (; fair; --fair) {
1205 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1206 if (ret < 0)
1207 continue;
1208 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1209 break;
1210 }
1211
1212 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1213 return fair;
1214 }
1215
1216 /**
1217 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs.
1218 * @gt: the &xe_gt
1219 * @vfid: starting VF identifier (can't be 0)
1220 * @num_vfs: number of VFs to provision (can't be 0)
1221 *
1222 * This function can only be called on PF.
1223 *
1224 * Return: 0 on success or a negative error code on failure.
1225 */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1226 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1227 unsigned int num_vfs)
1228 {
1229 u32 fair;
1230
1231 xe_gt_assert(gt, vfid);
1232 xe_gt_assert(gt, num_vfs);
1233
1234 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1235 fair = pf_estimate_fair_dbs(gt, num_vfs);
1236 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1237
1238 if (!fair)
1239 return -ENOSPC;
1240
1241 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1242 }
1243
pf_get_lmem_alignment(struct xe_gt * gt)1244 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1245 {
1246 /* this might be platform dependent */
1247 return SZ_2M;
1248 }
1249
pf_get_min_spare_lmem(struct xe_gt * gt)1250 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1251 {
1252 /* this might be platform dependent */
1253 return SZ_128M; /* XXX: preliminary */
1254 }
1255
pf_get_spare_lmem(struct xe_gt * gt)1256 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1257 {
1258 u64 spare;
1259
1260 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1261 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1262
1263 spare = gt->sriov.pf.spare.lmem_size;
1264 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1265
1266 return spare;
1267 }
1268
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1269 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1270 {
1271 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1272 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1273
1274 if (size && size < pf_get_min_spare_lmem(gt))
1275 return -EINVAL;
1276
1277 gt->sriov.pf.spare.lmem_size = size;
1278 return 0;
1279 }
1280
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1281 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1282 {
1283 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1284 struct xe_bo *bo;
1285
1286 bo = config->lmem_obj;
1287 return bo ? bo->size : 0;
1288 }
1289
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1290 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1291 {
1292 struct xe_device *xe = gt_to_xe(gt);
1293 struct xe_tile *tile;
1294 unsigned int tid;
1295 int err;
1296
1297 for_each_tile(tile, xe, tid) {
1298 if (tile->primary_gt == gt) {
1299 err = pf_push_vf_cfg_lmem(gt, vfid, size);
1300 } else {
1301 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1302
1303 if (!lmem)
1304 continue;
1305 err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1306 }
1307 if (unlikely(err))
1308 return err;
1309 }
1310 return 0;
1311 }
1312
pf_force_lmtt_invalidate(struct xe_device * xe)1313 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1314 {
1315 /* TODO */
1316 }
1317
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1318 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1319 {
1320 struct xe_lmtt *lmtt;
1321 struct xe_tile *tile;
1322 unsigned int tid;
1323
1324 xe_assert(xe, IS_DGFX(xe));
1325 xe_assert(xe, IS_SRIOV_PF(xe));
1326
1327 for_each_tile(tile, xe, tid) {
1328 lmtt = &tile->sriov.pf.lmtt;
1329 xe_lmtt_drop_pages(lmtt, vfid);
1330 }
1331 }
1332
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1333 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1334 {
1335 struct xe_gt_sriov_config *config;
1336 struct xe_tile *tile;
1337 struct xe_lmtt *lmtt;
1338 struct xe_bo *bo;
1339 struct xe_gt *gt;
1340 u64 total, offset;
1341 unsigned int gtid;
1342 unsigned int tid;
1343 int err;
1344
1345 xe_assert(xe, IS_DGFX(xe));
1346 xe_assert(xe, IS_SRIOV_PF(xe));
1347
1348 total = 0;
1349 for_each_tile(tile, xe, tid)
1350 total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1351
1352 for_each_tile(tile, xe, tid) {
1353 lmtt = &tile->sriov.pf.lmtt;
1354
1355 xe_lmtt_drop_pages(lmtt, vfid);
1356 if (!total)
1357 continue;
1358
1359 err = xe_lmtt_prepare_pages(lmtt, vfid, total);
1360 if (err)
1361 goto fail;
1362
1363 offset = 0;
1364 for_each_gt(gt, xe, gtid) {
1365 if (xe_gt_is_media_type(gt))
1366 continue;
1367
1368 config = pf_pick_vf_config(gt, vfid);
1369 bo = config->lmem_obj;
1370 if (!bo)
1371 continue;
1372
1373 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1374 if (err)
1375 goto fail;
1376 offset += bo->size;
1377 }
1378 }
1379
1380 pf_force_lmtt_invalidate(xe);
1381 return 0;
1382
1383 fail:
1384 for_each_tile(tile, xe, tid) {
1385 lmtt = &tile->sriov.pf.lmtt;
1386 xe_lmtt_drop_pages(lmtt, vfid);
1387 }
1388 return err;
1389 }
1390
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1391 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1392 {
1393 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1394 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1395 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1396
1397 if (config->lmem_obj) {
1398 xe_bo_unpin_map_no_vm(config->lmem_obj);
1399 config->lmem_obj = NULL;
1400 }
1401 }
1402
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1403 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1404 {
1405 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1406 struct xe_device *xe = gt_to_xe(gt);
1407 struct xe_tile *tile = gt_to_tile(gt);
1408 struct xe_bo *bo;
1409 int err;
1410
1411 xe_gt_assert(gt, vfid);
1412 xe_gt_assert(gt, IS_DGFX(xe));
1413 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1414
1415 size = round_up(size, pf_get_lmem_alignment(gt));
1416
1417 if (config->lmem_obj) {
1418 err = pf_distribute_config_lmem(gt, vfid, 0);
1419 if (unlikely(err))
1420 return err;
1421
1422 pf_reset_vf_lmtt(xe, vfid);
1423 pf_release_vf_config_lmem(gt, config);
1424 }
1425 xe_gt_assert(gt, !config->lmem_obj);
1426
1427 if (!size)
1428 return 0;
1429
1430 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1431 bo = xe_bo_create_pin_map(xe, tile, NULL,
1432 ALIGN(size, PAGE_SIZE),
1433 ttm_bo_type_kernel,
1434 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1435 XE_BO_FLAG_NEEDS_2M |
1436 XE_BO_FLAG_PINNED);
1437 if (IS_ERR(bo))
1438 return PTR_ERR(bo);
1439
1440 config->lmem_obj = bo;
1441
1442 err = pf_update_vf_lmtt(xe, vfid);
1443 if (unlikely(err))
1444 goto release;
1445
1446 err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1447 if (unlikely(err))
1448 goto reset_lmtt;
1449
1450 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1451 vfid, bo->size, bo->size / SZ_1M);
1452 return 0;
1453
1454 reset_lmtt:
1455 pf_reset_vf_lmtt(xe, vfid);
1456 release:
1457 pf_release_vf_config_lmem(gt, config);
1458 return err;
1459 }
1460
1461 /**
1462 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1463 * @gt: the &xe_gt
1464 * @vfid: the VF identifier
1465 *
1466 * This function can only be called on PF.
1467 *
1468 * Return: VF's (or PF's spare) LMEM quota.
1469 */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1470 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1471 {
1472 u64 size;
1473
1474 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1475 if (vfid)
1476 size = pf_get_vf_config_lmem(gt, vfid);
1477 else
1478 size = pf_get_spare_lmem(gt);
1479 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1480
1481 return size;
1482 }
1483
1484 /**
1485 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1486 * @gt: the &xe_gt (can't be media)
1487 * @vfid: the VF identifier
1488 * @size: requested LMEM size
1489 *
1490 * This function can only be called on PF.
1491 */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1492 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1493 {
1494 int err;
1495
1496 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1497 if (vfid)
1498 err = pf_provision_vf_lmem(gt, vfid, size);
1499 else
1500 err = pf_set_spare_lmem(gt, size);
1501 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1502
1503 return pf_config_set_u64_done(gt, vfid, size,
1504 xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1505 vfid ? "LMEM" : "spare LMEM", err);
1506 }
1507
1508 /**
1509 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1510 * @gt: the &xe_gt (can't be media)
1511 * @vfid: starting VF identifier (can't be 0)
1512 * @num_vfs: number of VFs to provision
1513 * @size: requested LMEM size
1514 *
1515 * This function can only be called on PF.
1516 *
1517 * Return: 0 on success or a negative error code on failure.
1518 */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1519 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1520 unsigned int num_vfs, u64 size)
1521 {
1522 unsigned int n;
1523 int err = 0;
1524
1525 xe_gt_assert(gt, vfid);
1526 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1527
1528 if (!num_vfs)
1529 return 0;
1530
1531 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1532 for (n = vfid; n < vfid + num_vfs; n++) {
1533 err = pf_provision_vf_lmem(gt, n, size);
1534 if (err)
1535 break;
1536 }
1537 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1538
1539 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1540 xe_gt_sriov_pf_config_get_lmem,
1541 "LMEM", n, err);
1542 }
1543
pf_query_free_lmem(struct xe_gt * gt)1544 static u64 pf_query_free_lmem(struct xe_gt *gt)
1545 {
1546 struct xe_tile *tile = gt->tile;
1547
1548 return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1549 }
1550
pf_query_max_lmem(struct xe_gt * gt)1551 static u64 pf_query_max_lmem(struct xe_gt *gt)
1552 {
1553 u64 alignment = pf_get_lmem_alignment(gt);
1554 u64 spare = pf_get_spare_lmem(gt);
1555 u64 free = pf_query_free_lmem(gt);
1556 u64 avail;
1557
1558 /* XXX: need to account for 2MB blocks only */
1559 avail = free > spare ? free - spare : 0;
1560 avail = round_down(avail, alignment);
1561
1562 return avail;
1563 }
1564
1565 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1566 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
1567 #else
1568 #define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
1569 #endif
1570
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1571 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1572 {
1573 u64 available = pf_query_max_lmem(gt);
1574 u64 alignment = pf_get_lmem_alignment(gt);
1575 u64 fair;
1576
1577 fair = div_u64(available, num_vfs);
1578 fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
1579 fair = ALIGN_DOWN(fair, alignment);
1580 #ifdef MAX_FAIR_LMEM
1581 fair = min_t(u64, MAX_FAIR_LMEM, fair);
1582 #endif
1583 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1584 available / SZ_1M, num_vfs, fair / SZ_1M);
1585 return fair;
1586 }
1587
1588 /**
1589 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1590 * @gt: the &xe_gt (can't be media)
1591 * @vfid: starting VF identifier (can't be 0)
1592 * @num_vfs: number of VFs to provision (can't be 0)
1593 *
1594 * This function can only be called on PF.
1595 *
1596 * Return: 0 on success or a negative error code on failure.
1597 */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1598 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1599 unsigned int num_vfs)
1600 {
1601 u64 fair;
1602
1603 xe_gt_assert(gt, vfid);
1604 xe_gt_assert(gt, num_vfs);
1605 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1606
1607 if (!IS_DGFX(gt_to_xe(gt)))
1608 return 0;
1609
1610 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1611 fair = pf_estimate_fair_lmem(gt, num_vfs);
1612 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1613
1614 if (!fair)
1615 return -ENOSPC;
1616
1617 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1618 }
1619
1620 /**
1621 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1622 * @gt: the &xe_gt
1623 * @vfid: starting VF identifier (can't be 0)
1624 * @num_vfs: number of VFs to provision (can't be 0)
1625 *
1626 * This function can only be called on PF.
1627 *
1628 * Return: 0 on success or a negative error code on failure.
1629 */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1630 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1631 unsigned int num_vfs)
1632 {
1633 int result = 0;
1634 int err;
1635
1636 xe_gt_assert(gt, vfid);
1637 xe_gt_assert(gt, num_vfs);
1638
1639 if (!xe_gt_is_media_type(gt)) {
1640 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1641 result = result ?: err;
1642 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1643 result = result ?: err;
1644 }
1645 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1646 result = result ?: err;
1647 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1648 result = result ?: err;
1649
1650 return result;
1651 }
1652
exec_quantum_unit(u32 exec_quantum)1653 static const char *exec_quantum_unit(u32 exec_quantum)
1654 {
1655 return exec_quantum ? "ms" : "(infinity)";
1656 }
1657
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1658 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1659 u32 exec_quantum)
1660 {
1661 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1662 int err;
1663
1664 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1665 if (unlikely(err))
1666 return err;
1667
1668 config->exec_quantum = exec_quantum;
1669 return 0;
1670 }
1671
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1672 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1673 {
1674 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1675
1676 return config->exec_quantum;
1677 }
1678
1679 /**
1680 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1681 * @gt: the &xe_gt
1682 * @vfid: the VF identifier
1683 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1684 *
1685 * This function can only be called on PF.
1686 *
1687 * Return: 0 on success or a negative error code on failure.
1688 */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1689 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1690 u32 exec_quantum)
1691 {
1692 int err;
1693
1694 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1695 err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1696 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1697
1698 return pf_config_set_u32_done(gt, vfid, exec_quantum,
1699 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1700 "execution quantum", exec_quantum_unit, err);
1701 }
1702
1703 /**
1704 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1705 * @gt: the &xe_gt
1706 * @vfid: the VF identifier
1707 *
1708 * This function can only be called on PF.
1709 *
1710 * Return: VF's (or PF's) execution quantum in milliseconds.
1711 */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1712 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1713 {
1714 u32 exec_quantum;
1715
1716 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1717 exec_quantum = pf_get_exec_quantum(gt, vfid);
1718 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1719
1720 return exec_quantum;
1721 }
1722
preempt_timeout_unit(u32 preempt_timeout)1723 static const char *preempt_timeout_unit(u32 preempt_timeout)
1724 {
1725 return preempt_timeout ? "us" : "(infinity)";
1726 }
1727
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1728 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1729 u32 preempt_timeout)
1730 {
1731 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1732 int err;
1733
1734 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1735 if (unlikely(err))
1736 return err;
1737
1738 config->preempt_timeout = preempt_timeout;
1739
1740 return 0;
1741 }
1742
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1743 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1744 {
1745 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1746
1747 return config->preempt_timeout;
1748 }
1749
1750 /**
1751 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1752 * @gt: the &xe_gt
1753 * @vfid: the VF identifier
1754 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1755 *
1756 * This function can only be called on PF.
1757 *
1758 * Return: 0 on success or a negative error code on failure.
1759 */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1760 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1761 u32 preempt_timeout)
1762 {
1763 int err;
1764
1765 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1766 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1767 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1768
1769 return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1770 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1771 "preemption timeout", preempt_timeout_unit, err);
1772 }
1773
1774 /**
1775 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1776 * @gt: the &xe_gt
1777 * @vfid: the VF identifier
1778 *
1779 * This function can only be called on PF.
1780 *
1781 * Return: VF's (or PF's) preemption timeout in microseconds.
1782 */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1783 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1784 {
1785 u32 preempt_timeout;
1786
1787 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1788 preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1789 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1790
1791 return preempt_timeout;
1792 }
1793
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)1794 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1795 {
1796 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1797
1798 config->exec_quantum = 0;
1799 config->preempt_timeout = 0;
1800 }
1801
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1802 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1803 enum xe_guc_klv_threshold_index index, u32 value)
1804 {
1805 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1806 int err;
1807
1808 err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1809 if (unlikely(err))
1810 return err;
1811
1812 config->thresholds[index] = value;
1813
1814 return 0;
1815 }
1816
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1817 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1818 enum xe_guc_klv_threshold_index index)
1819 {
1820 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1821
1822 return config->thresholds[index];
1823 }
1824
threshold_unit(u32 threshold)1825 static const char *threshold_unit(u32 threshold)
1826 {
1827 return threshold ? "" : "(disabled)";
1828 }
1829
1830 /**
1831 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1832 * @gt: the &xe_gt
1833 * @vfid: the VF identifier
1834 * @index: the threshold index
1835 * @value: requested value (0 means disabled)
1836 *
1837 * This function can only be called on PF.
1838 *
1839 * Return: 0 on success or a negative error code on failure.
1840 */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1841 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1842 enum xe_guc_klv_threshold_index index, u32 value)
1843 {
1844 u32 key = xe_guc_klv_threshold_index_to_key(index);
1845 const char *name = xe_guc_klv_key_to_string(key);
1846 int err;
1847
1848 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1849 err = pf_provision_threshold(gt, vfid, index, value);
1850 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1851
1852 return pf_config_set_u32_done(gt, vfid, value,
1853 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1854 name, threshold_unit, err);
1855 }
1856
1857 /**
1858 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1859 * @gt: the &xe_gt
1860 * @vfid: the VF identifier
1861 * @index: the threshold index
1862 *
1863 * This function can only be called on PF.
1864 *
1865 * Return: value of VF's (or PF's) threshold.
1866 */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1867 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1868 enum xe_guc_klv_threshold_index index)
1869 {
1870 u32 value;
1871
1872 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1873 value = pf_get_threshold(gt, vfid, index);
1874 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1875
1876 return value;
1877 }
1878
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)1879 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1880 {
1881 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1882
1883 #define reset_threshold_config(TAG, ...) ({ \
1884 config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
1885 });
1886
1887 MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1888 #undef reset_threshold_config
1889 }
1890
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)1891 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1892 {
1893 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1894 struct xe_device *xe = gt_to_xe(gt);
1895
1896 if (!xe_gt_is_media_type(gt)) {
1897 pf_release_vf_config_ggtt(gt, config);
1898 if (IS_DGFX(xe)) {
1899 pf_release_vf_config_lmem(gt, config);
1900 pf_update_vf_lmtt(xe, vfid);
1901 }
1902 }
1903 pf_release_config_ctxs(gt, config);
1904 pf_release_config_dbs(gt, config);
1905 pf_reset_config_sched(gt, config);
1906 pf_reset_config_thresholds(gt, config);
1907 }
1908
1909 /**
1910 * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1911 * @gt: the &xe_gt
1912 * @vfid: the VF identifier (can't be PF)
1913 * @force: force configuration release
1914 *
1915 * This function can only be called on PF.
1916 *
1917 * Return: 0 on success or a negative error code on failure.
1918 */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)1919 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1920 {
1921 int err;
1922
1923 xe_gt_assert(gt, vfid);
1924
1925 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1926 err = pf_send_vf_cfg_reset(gt, vfid);
1927 if (!err || force)
1928 pf_release_vf_config(gt, vfid);
1929 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1930
1931 if (unlikely(err)) {
1932 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1933 vfid, ERR_PTR(err),
1934 force ? " but all resources were released anyway!" : "");
1935 }
1936
1937 return force ? 0 : err;
1938 }
1939
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)1940 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
1941 {
1942 if (xe_ggtt_node_allocated(ggtt_region))
1943 xe_ggtt_assign(ggtt_region, vfid);
1944 }
1945
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)1946 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
1947 {
1948 struct xe_migrate *m = tile->migrate;
1949 struct dma_fence *fence;
1950 int err;
1951
1952 if (!bo)
1953 return 0;
1954
1955 xe_bo_lock(bo, false);
1956 fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
1957 if (IS_ERR(fence)) {
1958 err = PTR_ERR(fence);
1959 } else if (!fence) {
1960 err = -ENOMEM;
1961 } else {
1962 long ret = dma_fence_wait_timeout(fence, false, timeout);
1963
1964 err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
1965 dma_fence_put(fence);
1966 if (!err)
1967 xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
1968 jiffies_to_msecs(timeout - ret));
1969 }
1970 xe_bo_unlock(bo);
1971
1972 return err;
1973 }
1974
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)1975 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
1976 {
1977 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1978 struct xe_tile *tile = gt_to_tile(gt);
1979 struct xe_device *xe = gt_to_xe(gt);
1980 int err = 0;
1981
1982 /*
1983 * Only GGTT and LMEM requires to be cleared by the PF.
1984 * GuC doorbell IDs and context IDs do not need any clearing.
1985 */
1986 if (!xe_gt_is_media_type(gt)) {
1987 pf_sanitize_ggtt(config->ggtt_region, vfid);
1988 if (IS_DGFX(xe))
1989 err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
1990 }
1991
1992 return err;
1993 }
1994
1995 /**
1996 * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
1997 * @gt: the &xe_gt
1998 * @vfid: the VF identifier (can't be PF)
1999 * @timeout: maximum timeout to wait for completion in jiffies
2000 *
2001 * This function can only be called on PF.
2002 *
2003 * Return: 0 on success or a negative error code on failure.
2004 */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)2005 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2006 {
2007 int err;
2008
2009 xe_gt_assert(gt, vfid != PFID);
2010
2011 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2012 err = pf_sanitize_vf_resources(gt, vfid, timeout);
2013 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2014
2015 if (unlikely(err))
2016 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2017 vfid, ERR_PTR(err));
2018 return err;
2019 }
2020
2021 /**
2022 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2023 * @gt: the &xe_gt
2024 * @vfid: the VF identifier (can't be PF)
2025 * @refresh: explicit refresh
2026 *
2027 * This function can only be called on PF.
2028 *
2029 * Return: 0 on success or a negative error code on failure.
2030 */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)2031 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2032 {
2033 int err = 0;
2034
2035 xe_gt_assert(gt, vfid);
2036
2037 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2038 err = pf_push_vf_cfg(gt, vfid, refresh);
2039 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2040
2041 if (unlikely(err)) {
2042 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2043 refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2044 }
2045
2046 return err;
2047 }
2048
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2049 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2050 {
2051 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2052 struct xe_device *xe = gt_to_xe(gt);
2053 bool is_primary = !xe_gt_is_media_type(gt);
2054 bool valid_ggtt, valid_ctxs, valid_dbs;
2055 bool valid_any, valid_all;
2056
2057 valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2058 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2059 valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2060
2061 /* note that GuC doorbells are optional */
2062 valid_any = valid_ctxs || valid_dbs;
2063 valid_all = valid_ctxs;
2064
2065 /* and GGTT/LMEM is configured on primary GT only */
2066 valid_all = valid_all && valid_ggtt;
2067 valid_any = valid_any || (valid_ggtt && is_primary);
2068
2069 if (IS_DGFX(xe)) {
2070 bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2071
2072 valid_any = valid_any || (valid_lmem && is_primary);
2073 valid_all = valid_all && valid_lmem;
2074 }
2075
2076 return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
2077 }
2078
2079 /**
2080 * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2081 * @gt: the &xe_gt
2082 * @vfid: the VF identifier (can't be PF)
2083 *
2084 * This function can only be called on PF.
2085 *
2086 * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2087 */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2088 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2089 {
2090 bool empty;
2091
2092 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2093 xe_gt_assert(gt, vfid);
2094
2095 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2096 empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2097 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2098
2099 return empty;
2100 }
2101
2102 /**
2103 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2104 * @gt: the &xe_gt
2105 *
2106 * Any prior configurations pushed to GuC are lost when the GT is reset.
2107 * Push again all non-empty VF configurations to the GuC.
2108 *
2109 * This function can only be called on PF.
2110 */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2111 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2112 {
2113 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2114 unsigned int fail = 0, skip = 0;
2115
2116 for (n = 1; n <= total_vfs; n++) {
2117 if (xe_gt_sriov_pf_config_is_empty(gt, n))
2118 skip++;
2119 else if (xe_gt_sriov_pf_config_push(gt, n, false))
2120 fail++;
2121 }
2122
2123 if (fail)
2124 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2125 fail, total_vfs - skip, str_plural(total_vfs));
2126
2127 if (fail != total_vfs)
2128 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2129 total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2130 }
2131
2132 /**
2133 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2134 * @gt: the &xe_gt
2135 * @p: the &drm_printer
2136 *
2137 * Print GGTT configuration data for all VFs.
2138 * VFs without provisioned GGTT are ignored.
2139 *
2140 * This function can only be called on PF.
2141 */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2142 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2143 {
2144 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2145 const struct xe_gt_sriov_config *config;
2146 char buf[10];
2147
2148 for (n = 1; n <= total_vfs; n++) {
2149 config = >->sriov.pf.vfs[n].config;
2150 if (!xe_ggtt_node_allocated(config->ggtt_region))
2151 continue;
2152
2153 string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2154 buf, sizeof(buf));
2155 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2156 n, config->ggtt_region->base.start,
2157 config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2158 buf);
2159 }
2160
2161 return 0;
2162 }
2163
2164 /**
2165 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2166 * @gt: the &xe_gt
2167 * @p: the &drm_printer
2168 *
2169 * Print GuC context ID allocations across all VFs.
2170 * VFs without GuC context IDs are skipped.
2171 *
2172 * This function can only be called on PF.
2173 * Return: 0 on success or a negative error code on failure.
2174 */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2175 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2176 {
2177 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2178 const struct xe_gt_sriov_config *config;
2179
2180 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2181 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2182
2183 for (n = 1; n <= total_vfs; n++) {
2184 config = >->sriov.pf.vfs[n].config;
2185 if (!config->num_ctxs)
2186 continue;
2187
2188 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2189 n,
2190 config->begin_ctx,
2191 config->begin_ctx + config->num_ctxs - 1,
2192 config->num_ctxs);
2193 }
2194
2195 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2196 return 0;
2197 }
2198
2199 /**
2200 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2201 * @gt: the &xe_gt
2202 * @p: the &drm_printer
2203 *
2204 * Print GuC doorbell IDs allocations across all VFs.
2205 * VFs without GuC doorbell IDs are skipped.
2206 *
2207 * This function can only be called on PF.
2208 * Return: 0 on success or a negative error code on failure.
2209 */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2210 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2211 {
2212 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2213 const struct xe_gt_sriov_config *config;
2214
2215 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2216 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2217
2218 for (n = 1; n <= total_vfs; n++) {
2219 config = >->sriov.pf.vfs[n].config;
2220 if (!config->num_dbs)
2221 continue;
2222
2223 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2224 n,
2225 config->begin_db,
2226 config->begin_db + config->num_dbs - 1,
2227 config->num_dbs);
2228 }
2229
2230 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2231 return 0;
2232 }
2233
2234 /**
2235 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2236 * @gt: the &xe_gt
2237 * @p: the &drm_printer
2238 *
2239 * Print GGTT ranges that are available for the provisioning.
2240 *
2241 * This function can only be called on PF.
2242 */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2243 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2244 {
2245 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2246 u64 alignment = pf_get_ggtt_alignment(gt);
2247 u64 spare, avail, total;
2248 char buf[10];
2249
2250 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2251
2252 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2253
2254 spare = pf_get_spare_ggtt(gt);
2255 total = xe_ggtt_print_holes(ggtt, alignment, p);
2256
2257 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2258
2259 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2260 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2261
2262 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2263 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2264
2265 avail = total > spare ? total - spare : 0;
2266
2267 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2268 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2269
2270 return 0;
2271 }
2272