1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7
8 #define ADF_DH895XCC_EP_OFFSET 0x3A000
9 #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
13
adf_enable_pf2vf_interrupts(struct adf_accel_dev * accel_dev)14 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
15 {
16 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
17 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
18 void __iomem *pmisc_bar_addr =
19 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
20
21 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
22 }
23
adf_disable_pf2vf_interrupts(struct adf_accel_dev * accel_dev)24 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
25 {
26 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
27 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
28 void __iomem *pmisc_bar_addr =
29 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
30
31 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
32 }
33
adf_enable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)34 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
35 u32 vf_mask)
36 {
37 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
38 struct adf_bar *pmisc =
39 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
40 void __iomem *pmisc_addr = pmisc->virt_addr;
41 u32 reg;
42
43 /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
44 if (vf_mask & 0xFFFF) {
45 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
46 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
47 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
48 }
49
50 /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
51 if (vf_mask >> 16) {
52 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
53 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
54 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
55 }
56 }
57
adf_disable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)58 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
59 {
60 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
61 struct adf_bar *pmisc =
62 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
63 void __iomem *pmisc_addr = pmisc->virt_addr;
64 u32 reg;
65
66 /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
67 if (vf_mask & 0xFFFF) {
68 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
69 ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
70 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
71 }
72
73 /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
74 if (vf_mask >> 16) {
75 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
76 ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
77 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
78 }
79 }
80
__adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)81 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
82 {
83 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
84 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
85 void __iomem *pmisc_bar_addr =
86 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
87 u32 val, pf2vf_offset, count = 0;
88 u32 local_in_use_mask, local_in_use_pattern;
89 u32 remote_in_use_mask, remote_in_use_pattern;
90 struct mutex *lock; /* lock preventing concurrent acces of CSR */
91 u32 int_bit;
92 int ret = 0;
93
94 if (accel_dev->is_vf) {
95 pf2vf_offset = hw_data->get_pf2vf_offset(0);
96 lock = &accel_dev->vf.vf2pf_lock;
97 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
98 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
99 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
100 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
101 int_bit = ADF_VF2PF_INT;
102 } else {
103 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
104 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
105 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
106 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
107 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
108 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
109 int_bit = ADF_PF2VF_INT;
110 }
111
112 mutex_lock(lock);
113
114 /* Check if PF2VF CSR is in use by remote function */
115 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
116 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
117 dev_dbg(&GET_DEV(accel_dev),
118 "PF2VF CSR in use by remote function\n");
119 ret = -EBUSY;
120 goto out;
121 }
122
123 /* Attempt to get ownership of PF2VF CSR */
124 msg &= ~local_in_use_mask;
125 msg |= local_in_use_pattern;
126 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
127
128 /* Wait in case remote func also attempting to get ownership */
129 msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
130
131 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
132 if ((val & local_in_use_mask) != local_in_use_pattern) {
133 dev_dbg(&GET_DEV(accel_dev),
134 "PF2VF CSR in use by remote - collision detected\n");
135 ret = -EBUSY;
136 goto out;
137 }
138
139 /*
140 * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
141 * remain in the PF2VF CSR for all writes including ACK from remote
142 * until this local function relinquishes the CSR. Send the message
143 * by interrupting the remote.
144 */
145 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
146
147 /* Wait for confirmation from remote func it received the message */
148 do {
149 msleep(ADF_IOV_MSG_ACK_DELAY);
150 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
151 } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
152
153 if (val & int_bit) {
154 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
155 val &= ~int_bit;
156 ret = -EIO;
157 }
158
159 /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
160 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
161 out:
162 mutex_unlock(lock);
163 return ret;
164 }
165
166 /**
167 * adf_iov_putmsg() - send PF2VF message
168 * @accel_dev: Pointer to acceleration device.
169 * @msg: Message to send
170 * @vf_nr: VF number to which the message will be sent
171 *
172 * Function sends a messge from the PF to a VF
173 *
174 * Return: 0 on success, error code otherwise.
175 */
adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)176 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
177 {
178 u32 count = 0;
179 int ret;
180
181 do {
182 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
183 if (ret)
184 msleep(ADF_IOV_MSG_RETRY_DELAY);
185 } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
186
187 return ret;
188 }
189
adf_vf2pf_req_hndl(struct adf_accel_vf_info * vf_info)190 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
191 {
192 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
193 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
194 int bar_id = hw_data->get_misc_bar_id(hw_data);
195 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
196 void __iomem *pmisc_addr = pmisc->virt_addr;
197 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
198
199 /* Read message from the VF */
200 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
201
202 /* To ACK, clear the VF2PFINT bit */
203 msg &= ~ADF_VF2PF_INT;
204 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
205
206 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
207 /* Ignore legacy non-system (non-kernel) VF2PF messages */
208 goto err;
209
210 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
211 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
212 {
213 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
214
215 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
216 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
217 ADF_PF2VF_MSGTYPE_SHIFT) |
218 (ADF_PFVF_COMPATIBILITY_VERSION <<
219 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
220
221 dev_dbg(&GET_DEV(accel_dev),
222 "Compatibility Version Request from VF%d vers=%u\n",
223 vf_nr + 1, vf_compat_ver);
224
225 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
226 dev_err(&GET_DEV(accel_dev),
227 "VF (vers %d) incompatible with PF (vers %d)\n",
228 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
229 resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
230 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
231 } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
232 dev_err(&GET_DEV(accel_dev),
233 "VF (vers %d) compat with PF (vers %d) unkn.\n",
234 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
235 resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
236 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
237 } else {
238 dev_dbg(&GET_DEV(accel_dev),
239 "VF (vers %d) compatible with PF (vers %d)\n",
240 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
241 resp |= ADF_PF2VF_VF_COMPATIBLE <<
242 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
243 }
244 }
245 break;
246 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
247 dev_dbg(&GET_DEV(accel_dev),
248 "Legacy VersionRequest received from VF%d 0x%x\n",
249 vf_nr + 1, msg);
250 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
251 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
252 ADF_PF2VF_MSGTYPE_SHIFT) |
253 (ADF_PFVF_COMPATIBILITY_VERSION <<
254 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
255 resp |= ADF_PF2VF_VF_COMPATIBLE <<
256 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
257 /* Set legacy major and minor version num */
258 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
259 1 << ADF_PF2VF_MINORVERSION_SHIFT;
260 break;
261 case ADF_VF2PF_MSGTYPE_INIT:
262 {
263 dev_dbg(&GET_DEV(accel_dev),
264 "Init message received from VF%d 0x%x\n",
265 vf_nr + 1, msg);
266 vf_info->init = true;
267 }
268 break;
269 case ADF_VF2PF_MSGTYPE_SHUTDOWN:
270 {
271 dev_dbg(&GET_DEV(accel_dev),
272 "Shutdown message received from VF%d 0x%x\n",
273 vf_nr + 1, msg);
274 vf_info->init = false;
275 }
276 break;
277 default:
278 goto err;
279 }
280
281 if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
282 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
283
284 /* re-enable interrupt on PF from this VF */
285 adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
286 return;
287 err:
288 dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
289 vf_nr + 1, msg);
290 }
291
adf_pf2vf_notify_restarting(struct adf_accel_dev * accel_dev)292 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
293 {
294 struct adf_accel_vf_info *vf;
295 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
296 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
297 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
298
299 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
300 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
301 dev_err(&GET_DEV(accel_dev),
302 "Failed to send restarting msg to VF%d\n", i);
303 }
304 }
305
adf_vf2pf_request_version(struct adf_accel_dev * accel_dev)306 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
307 {
308 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
309 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
310 u32 msg = 0;
311 int ret;
312
313 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
314 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
315 msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
316 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
317
318 reinit_completion(&accel_dev->vf.iov_msg_completion);
319
320 /* Send request from VF to PF */
321 ret = adf_iov_putmsg(accel_dev, msg, 0);
322 if (ret) {
323 dev_err(&GET_DEV(accel_dev),
324 "Failed to send Compatibility Version Request.\n");
325 return ret;
326 }
327
328 /* Wait for response */
329 if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
330 timeout)) {
331 dev_err(&GET_DEV(accel_dev),
332 "IOV request/response message timeout expired\n");
333 return -EIO;
334 }
335
336 /* Response from PF received, check compatibility */
337 switch (accel_dev->vf.compatible) {
338 case ADF_PF2VF_VF_COMPATIBLE:
339 break;
340 case ADF_PF2VF_VF_COMPAT_UNKNOWN:
341 /* VF is newer than PF and decides whether it is compatible */
342 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
343 break;
344 fallthrough;
345 case ADF_PF2VF_VF_INCOMPATIBLE:
346 dev_err(&GET_DEV(accel_dev),
347 "PF (vers %d) and VF (vers %d) are not compatible\n",
348 accel_dev->vf.pf_version,
349 ADF_PFVF_COMPATIBILITY_VERSION);
350 return -EINVAL;
351 default:
352 dev_err(&GET_DEV(accel_dev),
353 "Invalid response from PF; assume not compatible\n");
354 return -EINVAL;
355 }
356 return ret;
357 }
358
359 /**
360 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
361 *
362 * @accel_dev: Pointer to acceleration device virtual function.
363 *
364 * Return: 0 on success, error code otherwise.
365 */
adf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)366 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
367 {
368 adf_enable_pf2vf_interrupts(accel_dev);
369 return adf_vf2pf_request_version(accel_dev);
370 }
371 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
372