1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7
8 #define ADF_DH895XCC_EP_OFFSET 0x3A000
9 #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
13
adf_enable_pf2vf_interrupts(struct adf_accel_dev * accel_dev)14 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
15 {
16 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
17 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
18 void __iomem *pmisc_bar_addr =
19 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
20
21 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
22 }
23
adf_disable_pf2vf_interrupts(struct adf_accel_dev * accel_dev)24 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
25 {
26 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
27 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
28 void __iomem *pmisc_bar_addr =
29 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
30
31 ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
32 }
33
adf_enable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)34 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
35 u32 vf_mask)
36 {
37 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
38 struct adf_bar *pmisc =
39 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
40 void __iomem *pmisc_addr = pmisc->virt_addr;
41 u32 reg;
42
43 /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
44 if (vf_mask & 0xFFFF) {
45 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
46 reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
47 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
48 }
49
50 /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
51 if (vf_mask >> 16) {
52 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
53 reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
54 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
55 }
56 }
57
adf_disable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)58 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
59 {
60 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
61 struct adf_bar *pmisc =
62 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
63 void __iomem *pmisc_addr = pmisc->virt_addr;
64 u32 reg;
65
66 /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
67 if (vf_mask & 0xFFFF) {
68 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
69 ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
70 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
71 }
72
73 /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
74 if (vf_mask >> 16) {
75 reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
76 ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
77 ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
78 }
79 }
80
__adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)81 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
82 {
83 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
84 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
85 void __iomem *pmisc_bar_addr =
86 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
87 u32 val, pf2vf_offset, count = 0;
88 u32 local_in_use_mask, local_in_use_pattern;
89 u32 remote_in_use_mask, remote_in_use_pattern;
90 struct mutex *lock; /* lock preventing concurrent acces of CSR */
91 u32 int_bit;
92 int ret = 0;
93
94 if (accel_dev->is_vf) {
95 pf2vf_offset = hw_data->get_pf2vf_offset(0);
96 lock = &accel_dev->vf.vf2pf_lock;
97 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
98 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
99 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
100 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
101 int_bit = ADF_VF2PF_INT;
102 } else {
103 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
104 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
105 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
106 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
107 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
108 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
109 int_bit = ADF_PF2VF_INT;
110 }
111
112 mutex_lock(lock);
113
114 /* Check if the PFVF CSR is in use by remote function */
115 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
116 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
117 dev_dbg(&GET_DEV(accel_dev),
118 "PFVF CSR in use by remote function\n");
119 ret = -EBUSY;
120 goto out;
121 }
122
123 msg &= ~local_in_use_mask;
124 msg |= local_in_use_pattern;
125
126 /* Attempt to get ownership of the PFVF CSR */
127 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
128
129 /* Wait for confirmation from remote func it received the message */
130 do {
131 msleep(ADF_IOV_MSG_ACK_DELAY);
132 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
133 } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
134
135 if (val & int_bit) {
136 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
137 val &= ~int_bit;
138 ret = -EIO;
139 }
140
141 if (val != msg) {
142 dev_dbg(&GET_DEV(accel_dev),
143 "Collision - PFVF CSR overwritten by remote function\n");
144 ret = -EIO;
145 goto out;
146 }
147
148 /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
149 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
150 out:
151 mutex_unlock(lock);
152 return ret;
153 }
154
155 /**
156 * adf_iov_putmsg() - send PFVF message
157 * @accel_dev: Pointer to acceleration device.
158 * @msg: Message to send
159 * @vf_nr: VF number to which the message will be sent if on PF, ignored
160 * otherwise
161 *
162 * Function sends a message through the PFVF channel
163 *
164 * Return: 0 on success, error code otherwise.
165 */
adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)166 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
167 {
168 u32 count = 0;
169 int ret;
170
171 do {
172 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
173 if (ret)
174 msleep(ADF_IOV_MSG_RETRY_DELAY);
175 } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
176
177 return ret;
178 }
179
adf_vf2pf_req_hndl(struct adf_accel_vf_info * vf_info)180 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
181 {
182 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
183 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
184 int bar_id = hw_data->get_misc_bar_id(hw_data);
185 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
186 void __iomem *pmisc_addr = pmisc->virt_addr;
187 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
188
189 /* Read message from the VF */
190 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
191 if (!(msg & ADF_VF2PF_INT)) {
192 dev_info(&GET_DEV(accel_dev),
193 "Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
194 goto out;
195 }
196
197 /* To ACK, clear the VF2PFINT bit */
198 msg &= ~ADF_VF2PF_INT;
199 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
200
201 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
202 /* Ignore legacy non-system (non-kernel) VF2PF messages */
203 goto err;
204
205 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
206 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
207 {
208 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
209
210 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
211 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
212 ADF_PF2VF_MSGTYPE_SHIFT) |
213 (ADF_PFVF_COMPATIBILITY_VERSION <<
214 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
215
216 dev_dbg(&GET_DEV(accel_dev),
217 "Compatibility Version Request from VF%d vers=%u\n",
218 vf_nr + 1, vf_compat_ver);
219
220 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
221 dev_err(&GET_DEV(accel_dev),
222 "VF (vers %d) incompatible with PF (vers %d)\n",
223 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
224 resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
225 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
226 } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
227 dev_err(&GET_DEV(accel_dev),
228 "VF (vers %d) compat with PF (vers %d) unkn.\n",
229 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
230 resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
231 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
232 } else {
233 dev_dbg(&GET_DEV(accel_dev),
234 "VF (vers %d) compatible with PF (vers %d)\n",
235 vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
236 resp |= ADF_PF2VF_VF_COMPATIBLE <<
237 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
238 }
239 }
240 break;
241 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
242 dev_dbg(&GET_DEV(accel_dev),
243 "Legacy VersionRequest received from VF%d 0x%x\n",
244 vf_nr + 1, msg);
245 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
246 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
247 ADF_PF2VF_MSGTYPE_SHIFT) |
248 (ADF_PFVF_COMPATIBILITY_VERSION <<
249 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
250 resp |= ADF_PF2VF_VF_COMPATIBLE <<
251 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
252 /* Set legacy major and minor version num */
253 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
254 1 << ADF_PF2VF_MINORVERSION_SHIFT;
255 break;
256 case ADF_VF2PF_MSGTYPE_INIT:
257 {
258 dev_dbg(&GET_DEV(accel_dev),
259 "Init message received from VF%d 0x%x\n",
260 vf_nr + 1, msg);
261 vf_info->init = true;
262 }
263 break;
264 case ADF_VF2PF_MSGTYPE_SHUTDOWN:
265 {
266 dev_dbg(&GET_DEV(accel_dev),
267 "Shutdown message received from VF%d 0x%x\n",
268 vf_nr + 1, msg);
269 vf_info->init = false;
270 }
271 break;
272 default:
273 goto err;
274 }
275
276 if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
277 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
278
279 out:
280 /* re-enable interrupt on PF from this VF */
281 adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
282 return;
283 err:
284 dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
285 vf_nr + 1, msg);
286 }
287
adf_pf2vf_notify_restarting(struct adf_accel_dev * accel_dev)288 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
289 {
290 struct adf_accel_vf_info *vf;
291 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
292 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
293 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
294
295 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
296 if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
297 dev_err(&GET_DEV(accel_dev),
298 "Failed to send restarting msg to VF%d\n", i);
299 }
300 }
301
adf_vf2pf_request_version(struct adf_accel_dev * accel_dev)302 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
303 {
304 unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
305 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
306 u32 msg = 0;
307 int ret;
308
309 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
310 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
311 msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
312 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
313
314 reinit_completion(&accel_dev->vf.iov_msg_completion);
315
316 /* Send request from VF to PF */
317 ret = adf_iov_putmsg(accel_dev, msg, 0);
318 if (ret) {
319 dev_err(&GET_DEV(accel_dev),
320 "Failed to send Compatibility Version Request.\n");
321 return ret;
322 }
323
324 /* Wait for response */
325 if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
326 timeout)) {
327 dev_err(&GET_DEV(accel_dev),
328 "IOV request/response message timeout expired\n");
329 return -EIO;
330 }
331
332 /* Response from PF received, check compatibility */
333 switch (accel_dev->vf.compatible) {
334 case ADF_PF2VF_VF_COMPATIBLE:
335 break;
336 case ADF_PF2VF_VF_COMPAT_UNKNOWN:
337 /* VF is newer than PF and decides whether it is compatible */
338 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
339 break;
340 fallthrough;
341 case ADF_PF2VF_VF_INCOMPATIBLE:
342 dev_err(&GET_DEV(accel_dev),
343 "PF (vers %d) and VF (vers %d) are not compatible\n",
344 accel_dev->vf.pf_version,
345 ADF_PFVF_COMPATIBILITY_VERSION);
346 return -EINVAL;
347 default:
348 dev_err(&GET_DEV(accel_dev),
349 "Invalid response from PF; assume not compatible\n");
350 return -EINVAL;
351 }
352 return ret;
353 }
354
355 /**
356 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
357 *
358 * @accel_dev: Pointer to acceleration device virtual function.
359 *
360 * Return: 0 on success, error code otherwise.
361 */
adf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)362 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
363 {
364 adf_enable_pf2vf_interrupts(accel_dev);
365 return adf_vf2pf_request_version(accel_dev);
366 }
367 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
368