• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_regs.h"
10 
11 #include "xe_gt.h"
12 #include "xe_gt_sriov_pf.h"
13 #include "xe_gt_sriov_pf_config.h"
14 #include "xe_gt_sriov_pf_control.h"
15 #include "xe_gt_sriov_pf_helpers.h"
16 #include "xe_gt_sriov_pf_service.h"
17 #include "xe_gt_sriov_printk.h"
18 #include "xe_mmio.h"
19 #include "xe_pm.h"
20 
21 static void pf_worker_restart_func(struct work_struct *w);
22 
23 /*
24  * VF's metadata is maintained in the flexible array where:
25  *   - entry [0] contains metadata for the PF (only if applicable),
26  *   - entries [1..n] contain metadata for VF1..VFn::
27  *
28  *       <--------------------------- 1 + total_vfs ----------->
29  *      +-------+-------+-------+-----------------------+-------+
30  *      |   0   |   1   |   2   |                       |   n   |
31  *      +-------+-------+-------+-----------------------+-------+
32  *      |  PF   |  VF1  |  VF2  |      ...     ...      |  VFn  |
33  *      +-------+-------+-------+-----------------------+-------+
34  */
pf_alloc_metadata(struct xe_gt * gt)35 static int pf_alloc_metadata(struct xe_gt *gt)
36 {
37 	unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
38 
39 	gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
40 					sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
41 	if (!gt->sriov.pf.vfs)
42 		return -ENOMEM;
43 
44 	return 0;
45 }
46 
pf_init_workers(struct xe_gt * gt)47 static void pf_init_workers(struct xe_gt *gt)
48 {
49 	INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
50 }
51 
52 /**
53  * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
54  * @gt: the &xe_gt to initialize
55  *
56  * Early initialization of the PF data.
57  *
58  * Return: 0 on success or a negative error code on failure.
59  */
xe_gt_sriov_pf_init_early(struct xe_gt * gt)60 int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
61 {
62 	int err;
63 
64 	err = pf_alloc_metadata(gt);
65 	if (err)
66 		return err;
67 
68 	err = xe_gt_sriov_pf_service_init(gt);
69 	if (err)
70 		return err;
71 
72 	err = xe_gt_sriov_pf_control_init(gt);
73 	if (err)
74 		return err;
75 
76 	pf_init_workers(gt);
77 
78 	return 0;
79 }
80 
pf_needs_enable_ggtt_guest_update(struct xe_device * xe)81 static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
82 {
83 	return GRAPHICS_VERx100(xe) == 1200;
84 }
85 
pf_enable_ggtt_guest_update(struct xe_gt * gt)86 static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
87 {
88 	xe_mmio_write32(gt, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
89 }
90 
91 /**
92  * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
93  * @gt: the &xe_gt to initialize
94  *
95  * On some platforms the PF must explicitly enable VF's access to the GGTT.
96  */
xe_gt_sriov_pf_init_hw(struct xe_gt * gt)97 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
98 {
99 	if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
100 		pf_enable_ggtt_guest_update(gt);
101 
102 	xe_gt_sriov_pf_service_update(gt);
103 }
104 
pf_get_vf_regs_stride(struct xe_device * xe)105 static u32 pf_get_vf_regs_stride(struct xe_device *xe)
106 {
107 	return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
108 }
109 
xe_reg_vf_to_pf(struct xe_reg vf_reg,unsigned int vfid,u32 stride)110 static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
111 {
112 	struct xe_reg pf_reg = vf_reg;
113 
114 	pf_reg.vf = 0;
115 	pf_reg.addr += stride * vfid;
116 
117 	return pf_reg;
118 }
119 
pf_clear_vf_scratch_regs(struct xe_gt * gt,unsigned int vfid)120 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
121 {
122 	u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
123 	struct xe_reg scratch;
124 	int n, count;
125 
126 	if (xe_gt_is_media_type(gt)) {
127 		count = MED_VF_SW_FLAG_COUNT;
128 		for (n = 0; n < count; n++) {
129 			scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
130 			xe_mmio_write32(gt, scratch, 0);
131 		}
132 	} else {
133 		count = VF_SW_FLAG_COUNT;
134 		for (n = 0; n < count; n++) {
135 			scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
136 			xe_mmio_write32(gt, scratch, 0);
137 		}
138 	}
139 }
140 
141 /**
142  * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
143  * @gt: the &xe_gt
144  * @vfid: the VF identifier
145  *
146  * This function can only be called on PF.
147  */
xe_gt_sriov_pf_sanitize_hw(struct xe_gt * gt,unsigned int vfid)148 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
149 {
150 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
151 
152 	pf_clear_vf_scratch_regs(gt, vfid);
153 }
154 
pf_cancel_restart(struct xe_gt * gt)155 static void pf_cancel_restart(struct xe_gt *gt)
156 {
157 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
158 
159 	if (cancel_work_sync(&gt->sriov.pf.workers.restart))
160 		xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
161 }
162 
163 /**
164  * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
165  * @gt: the &xe_gt
166  *
167  * This function can only be called on the PF.
168  */
xe_gt_sriov_pf_stop_prepare(struct xe_gt * gt)169 void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
170 {
171 	pf_cancel_restart(gt);
172 }
173 
pf_restart(struct xe_gt * gt)174 static void pf_restart(struct xe_gt *gt)
175 {
176 	struct xe_device *xe = gt_to_xe(gt);
177 
178 	xe_pm_runtime_get(xe);
179 	xe_gt_sriov_pf_config_restart(gt);
180 	xe_gt_sriov_pf_control_restart(gt);
181 	xe_pm_runtime_put(xe);
182 
183 	xe_gt_sriov_dbg(gt, "restart completed\n");
184 }
185 
pf_worker_restart_func(struct work_struct * w)186 static void pf_worker_restart_func(struct work_struct *w)
187 {
188 	struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
189 
190 	pf_restart(gt);
191 }
192 
pf_queue_restart(struct xe_gt * gt)193 static void pf_queue_restart(struct xe_gt *gt)
194 {
195 	struct xe_device *xe = gt_to_xe(gt);
196 
197 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
198 
199 	if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
200 		xe_gt_sriov_dbg(gt, "restart already in queue!\n");
201 }
202 
203 /**
204  * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
205  * @gt: the &xe_gt
206  *
207  * This function can only be called on PF.
208  */
xe_gt_sriov_pf_restart(struct xe_gt * gt)209 void xe_gt_sriov_pf_restart(struct xe_gt *gt)
210 {
211 	pf_queue_restart(gt);
212 }
213