1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14
__fbnic_mbx_wr_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u64 desc)15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 int desc_idx, u64 desc)
17 {
18 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19
20 /* Write the upper 32b and then the lower 32b. Doing this the
21 * FW can then read lower, upper, lower to verify that the state
22 * of the descriptor wasn't changed mid-transaction.
23 */
24 fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 fw_wrfl(fbd);
26 fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28
__fbnic_mbx_invalidate_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u32 desc)29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 int desc_idx, u32 desc)
31 {
32 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33
34 /* For initialization we write the lower 32b of the descriptor first.
35 * This way we can set the state to mark it invalid before we clear the
36 * upper 32b.
37 */
38 fw_wr32(fbd, desc_offset, desc);
39 fw_wrfl(fbd);
40 fw_wr32(fbd, desc_offset + 1, 0);
41 }
42
__fbnic_mbx_rd_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 u64 desc;
47
48 desc = fw_rd32(fbd, desc_offset);
49 desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50
51 return desc;
52 }
53
fbnic_mbx_reset_desc_ring(struct fbnic_dev * fbd,int mbx_idx)54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 int desc_idx;
57
58 /* Disable DMA transactions from the device,
59 * and flush any transactions triggered during cleaning
60 */
61 switch (mbx_idx) {
62 case FBNIC_IPC_MBX_RX_IDX:
63 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 break;
66 case FBNIC_IPC_MBX_TX_IDX:
67 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 break;
70 }
71
72 wrfl(fbd);
73
74 /* Initialize first descriptor to all 0s. Doing this gives us a
75 * solid stop for the firmware to hit when it is done looping
76 * through the ring.
77 */
78 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79
80 /* We then fill the rest of the ring starting at the end and moving
81 * back toward descriptor 0 with skip descriptors that have no
82 * length nor address, and tell the firmware that they can skip
83 * them and just move past them to the one we initialized to 0.
84 */
85 for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 FBNIC_IPC_MBX_DESC_FW_CMPL |
88 FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90
fbnic_mbx_init(struct fbnic_dev * fbd)91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 int i;
94
95 /* Initialize lock to protect Tx ring */
96 spin_lock_init(&fbd->fw_tx_lock);
97
98 /* Reinitialize mailbox memory */
99 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
100 memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
101
102 /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
103 wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
104
105 /* Clear any stale causes in vector 0 as that is used for doorbell */
106 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
107
108 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
109 fbnic_mbx_reset_desc_ring(fbd, i);
110 }
111
fbnic_mbx_map_msg(struct fbnic_dev * fbd,int mbx_idx,struct fbnic_tlv_msg * msg,u16 length,u8 eom)112 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
113 struct fbnic_tlv_msg *msg, u16 length, u8 eom)
114 {
115 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
116 u8 tail = mbx->tail;
117 dma_addr_t addr;
118 int direction;
119
120 if (!mbx->ready || !fbnic_fw_present(fbd))
121 return -ENODEV;
122
123 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
124 DMA_TO_DEVICE;
125
126 if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
127 return -EBUSY;
128
129 addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
130 if (dma_mapping_error(fbd->dev, addr))
131 return -ENOSPC;
132
133 mbx->buf_info[tail].msg = msg;
134 mbx->buf_info[tail].addr = addr;
135
136 mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
137
138 fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
139
140 __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
141 FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
142 (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
143 (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
144 FBNIC_IPC_MBX_DESC_HOST_CMPL);
145
146 return 0;
147 }
148
fbnic_mbx_unmap_and_free_msg(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)149 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
150 int desc_idx)
151 {
152 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
153 int direction;
154
155 if (!mbx->buf_info[desc_idx].msg)
156 return;
157
158 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
159 DMA_TO_DEVICE;
160 dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
161 PAGE_SIZE, direction);
162
163 free_page((unsigned long)mbx->buf_info[desc_idx].msg);
164 mbx->buf_info[desc_idx].msg = NULL;
165 }
166
fbnic_mbx_clean_desc_ring(struct fbnic_dev * fbd,int mbx_idx)167 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
168 {
169 int i;
170
171 fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
172
173 for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
174 fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
175 }
176
fbnic_mbx_clean(struct fbnic_dev * fbd)177 void fbnic_mbx_clean(struct fbnic_dev *fbd)
178 {
179 int i;
180
181 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
182 fbnic_mbx_clean_desc_ring(fbd, i);
183 }
184
185 #define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
186 #define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
187
fbnic_mbx_alloc_rx_msgs(struct fbnic_dev * fbd)188 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
189 {
190 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
191 u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
192 int err = 0;
193
194 /* Do nothing if mailbox is not ready, or we already have pages on
195 * the ring that can be used by the firmware
196 */
197 if (!rx_mbx->ready)
198 return -ENODEV;
199
200 /* Fill all but 1 unused descriptors in the Rx queue. */
201 count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
202 while (!err && count--) {
203 struct fbnic_tlv_msg *msg;
204
205 msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
206 __GFP_NOWARN);
207 if (!msg) {
208 err = -ENOMEM;
209 break;
210 }
211
212 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
213 FBNIC_RX_PAGE_SIZE, 0);
214 if (err)
215 free_page((unsigned long)msg);
216 }
217
218 return err;
219 }
220
fbnic_mbx_map_tlv_msg(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg)221 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
222 struct fbnic_tlv_msg *msg)
223 {
224 unsigned long flags;
225 int err;
226
227 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
228
229 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
230 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
231
232 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
233
234 return err;
235 }
236
fbnic_mbx_process_tx_msgs(struct fbnic_dev * fbd)237 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
238 {
239 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
240 u8 head = tx_mbx->head;
241 u64 desc;
242
243 while (head != tx_mbx->tail) {
244 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
245 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
246 break;
247
248 fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
249
250 head++;
251 head %= FBNIC_IPC_MBX_DESC_LEN;
252 }
253
254 /* Record head for next interrupt */
255 tx_mbx->head = head;
256 }
257
258 /**
259 * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
260 * @fbd: FBNIC device structure
261 * @msg_type: ENUM value indicating message type to send
262 *
263 * Return:
264 * One the following values:
265 * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
266 * -ENODEV: Device I/O error
267 * -ENOMEM: Failed to allocate message
268 * -EBUSY: No space in mailbox
269 * -ENOSPC: DMA mapping failed
270 *
271 * This function sends a single TLV header indicating the host wants to take
272 * some action. However there are no other side effects which means that any
273 * response will need to be caught via a completion if this action is
274 * expected to kick off a resultant action.
275 */
fbnic_fw_xmit_simple_msg(struct fbnic_dev * fbd,u32 msg_type)276 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
277 {
278 struct fbnic_tlv_msg *msg;
279 int err = 0;
280
281 if (!fbnic_fw_present(fbd))
282 return -ENODEV;
283
284 msg = fbnic_tlv_msg_alloc(msg_type);
285 if (!msg)
286 return -ENOMEM;
287
288 err = fbnic_mbx_map_tlv_msg(fbd, msg);
289 if (err)
290 free_page((unsigned long)msg);
291
292 return err;
293 }
294
fbnic_mbx_init_desc_ring(struct fbnic_dev * fbd,int mbx_idx)295 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
296 {
297 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
298
299 mbx->ready = true;
300
301 switch (mbx_idx) {
302 case FBNIC_IPC_MBX_RX_IDX:
303 /* Enable DMA writes from the device */
304 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
305 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
306
307 /* Make sure we have a page for the FW to write to */
308 fbnic_mbx_alloc_rx_msgs(fbd);
309 break;
310 case FBNIC_IPC_MBX_TX_IDX:
311 /* Enable DMA reads from the device */
312 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
313 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
314 break;
315 }
316 }
317
fbnic_mbx_event(struct fbnic_dev * fbd)318 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
319 {
320 /* We only need to do this on the first interrupt following reset.
321 * this primes the mailbox so that we will have cleared all the
322 * skip descriptors.
323 */
324 if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
325 return false;
326
327 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
328
329 return true;
330 }
331
332 /**
333 * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
334 * to FW mailbox
335 *
336 * @fbd: FBNIC device structure
337 * @take_ownership: take/release the ownership
338 *
339 * Return: zero on success, negative value on failure
340 *
341 * Notifies the firmware that the driver either takes ownership of the NIC
342 * (when @take_ownership is true) or releases it.
343 */
fbnic_fw_xmit_ownership_msg(struct fbnic_dev * fbd,bool take_ownership)344 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
345 {
346 unsigned long req_time = jiffies;
347 struct fbnic_tlv_msg *msg;
348 int err = 0;
349
350 if (!fbnic_fw_present(fbd))
351 return -ENODEV;
352
353 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
354 if (!msg)
355 return -ENOMEM;
356
357 if (take_ownership) {
358 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
359 if (err)
360 goto free_message;
361 }
362
363 err = fbnic_mbx_map_tlv_msg(fbd, msg);
364 if (err)
365 goto free_message;
366
367 /* Initialize heartbeat, set last response to 1 second in the past
368 * so that we will trigger a timeout if the firmware doesn't respond
369 */
370 fbd->last_heartbeat_response = req_time - HZ;
371
372 fbd->last_heartbeat_request = req_time;
373
374 /* Set heartbeat detection based on if we are taking ownership */
375 fbd->fw_heartbeat_enabled = take_ownership;
376
377 return err;
378
379 free_message:
380 free_page((unsigned long)msg);
381 return err;
382 }
383
384 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
385 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
386 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
387 FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
388 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
389 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
390 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
391 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
392 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
393 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
394 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
395 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
396 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
397 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
398 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
399 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
400 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
401 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
402 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
403 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
404 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
405 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
406 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
407 FBNIC_TLV_ATTR_LAST
408 };
409
fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],struct fbnic_tlv_msg * attr,int len)410 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
411 struct fbnic_tlv_msg *attr, int len)
412 {
413 int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
414 struct fbnic_tlv_msg *mac_results[8];
415 int err, i = 0;
416
417 /* Make sure we have enough room to process all the MAC addresses */
418 if (len > 8)
419 return -ENOSPC;
420
421 /* Parse the array */
422 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
423 fbnic_fw_cap_resp_index,
424 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
425 if (err)
426 return err;
427
428 /* Copy results into MAC addr array */
429 for (i = 0; i < len && mac_results[i]; i++)
430 fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
431
432 /* Zero remaining unused addresses */
433 while (i < len)
434 eth_zero_addr(bmc_mac_addr[i++]);
435
436 return 0;
437 }
438
fbnic_fw_parse_cap_resp(void * opaque,struct fbnic_tlv_msg ** results)439 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
440 {
441 u32 active_slot = 0, all_multi = 0;
442 struct fbnic_dev *fbd = opaque;
443 u32 speed = 0, fec = 0;
444 size_t commit_size = 0;
445 bool bmc_present;
446 int err;
447
448 get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
449 fbd->fw_cap.running.mgmt.version);
450
451 if (!fbd->fw_cap.running.mgmt.version)
452 return -EINVAL;
453
454 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
455 char running_ver[FBNIC_FW_VER_MAX_SIZE];
456
457 fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
458 running_ver);
459 dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
460 running_ver,
461 MIN_FW_MAJOR_VERSION,
462 MIN_FW_MINOR_VERSION,
463 MIN_FW_BUILD_VERSION);
464 /* Disable TX mailbox to prevent card use until firmware is
465 * updated.
466 */
467 fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
468 return -EINVAL;
469 }
470
471 get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
472 fbd->fw_cap.running.mgmt.commit,
473 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
474 if (!commit_size)
475 dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
476
477 get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
478 fbd->fw_cap.stored.mgmt.version);
479 get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
480 fbd->fw_cap.stored.mgmt.commit,
481 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
482
483 get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
484 fbd->fw_cap.running.bootloader.version);
485 get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
486 fbd->fw_cap.running.bootloader.commit,
487 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
488
489 get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
490 fbd->fw_cap.stored.bootloader.version);
491 get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
492 fbd->fw_cap.stored.bootloader.commit,
493 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
494
495 get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
496 fbd->fw_cap.stored.undi.version);
497 get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
498 fbd->fw_cap.stored.undi.commit,
499 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
500
501 get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
502 fbd->fw_cap.active_slot = active_slot;
503
504 get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
505 get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
506 fbd->fw_cap.link_speed = speed;
507 fbd->fw_cap.link_fec = fec;
508
509 bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
510 if (bmc_present) {
511 struct fbnic_tlv_msg *attr;
512
513 attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
514 if (!attr)
515 return -EINVAL;
516
517 err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
518 attr, 4);
519 if (err)
520 return err;
521
522 get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
523 } else {
524 memset(fbd->fw_cap.bmc_mac_addr, 0,
525 sizeof(fbd->fw_cap.bmc_mac_addr));
526 }
527
528 fbd->fw_cap.bmc_present = bmc_present;
529
530 if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
531 fbd->fw_cap.all_multi = all_multi;
532
533 return 0;
534 }
535
536 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
537 FBNIC_TLV_ATTR_LAST
538 };
539
fbnic_fw_parse_ownership_resp(void * opaque,struct fbnic_tlv_msg ** results)540 static int fbnic_fw_parse_ownership_resp(void *opaque,
541 struct fbnic_tlv_msg **results)
542 {
543 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
544
545 /* Count the ownership response as a heartbeat reply */
546 fbd->last_heartbeat_response = jiffies;
547
548 return 0;
549 }
550
551 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
552 FBNIC_TLV_ATTR_LAST
553 };
554
fbnic_fw_parse_heartbeat_resp(void * opaque,struct fbnic_tlv_msg ** results)555 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
556 struct fbnic_tlv_msg **results)
557 {
558 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
559
560 fbd->last_heartbeat_response = jiffies;
561
562 return 0;
563 }
564
fbnic_fw_xmit_heartbeat_message(struct fbnic_dev * fbd)565 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
566 {
567 unsigned long req_time = jiffies;
568 struct fbnic_tlv_msg *msg;
569 int err = 0;
570
571 if (!fbnic_fw_present(fbd))
572 return -ENODEV;
573
574 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
575 if (!msg)
576 return -ENOMEM;
577
578 err = fbnic_mbx_map_tlv_msg(fbd, msg);
579 if (err)
580 goto free_message;
581
582 fbd->last_heartbeat_request = req_time;
583
584 return err;
585
586 free_message:
587 free_page((unsigned long)msg);
588 return err;
589 }
590
fbnic_fw_heartbeat_current(struct fbnic_dev * fbd)591 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
592 {
593 unsigned long last_response = fbd->last_heartbeat_response;
594 unsigned long last_request = fbd->last_heartbeat_request;
595
596 return !time_before(last_response, last_request);
597 }
598
fbnic_fw_init_heartbeat(struct fbnic_dev * fbd,bool poll)599 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
600 {
601 int err = -ETIMEDOUT;
602 int attempts = 50;
603
604 if (!fbnic_fw_present(fbd))
605 return -ENODEV;
606
607 while (attempts--) {
608 msleep(200);
609 if (poll)
610 fbnic_mbx_poll(fbd);
611
612 if (!fbnic_fw_heartbeat_current(fbd))
613 continue;
614
615 /* Place new message on mailbox to elicit a response */
616 err = fbnic_fw_xmit_heartbeat_message(fbd);
617 if (err)
618 dev_warn(fbd->dev,
619 "Failed to send heartbeat message: %d\n",
620 err);
621 break;
622 }
623
624 return err;
625 }
626
fbnic_fw_check_heartbeat(struct fbnic_dev * fbd)627 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
628 {
629 unsigned long last_request = fbd->last_heartbeat_request;
630 int err;
631
632 /* Do not check heartbeat or send another request until current
633 * period has expired. Otherwise we might start spamming requests.
634 */
635 if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
636 return;
637
638 /* We already reported no mailbox. Wait for it to come back */
639 if (!fbd->fw_heartbeat_enabled)
640 return;
641
642 /* Was the last heartbeat response long time ago? */
643 if (!fbnic_fw_heartbeat_current(fbd)) {
644 dev_warn(fbd->dev,
645 "Firmware did not respond to heartbeat message\n");
646 fbd->fw_heartbeat_enabled = false;
647 }
648
649 /* Place new message on mailbox to elicit a response */
650 err = fbnic_fw_xmit_heartbeat_message(fbd);
651 if (err)
652 dev_warn(fbd->dev, "Failed to send heartbeat message\n");
653 }
654
655 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
656 FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
657 fbnic_fw_parse_cap_resp),
658 FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
659 fbnic_fw_parse_ownership_resp),
660 FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
661 fbnic_fw_parse_heartbeat_resp),
662 FBNIC_TLV_MSG_ERROR
663 };
664
fbnic_mbx_process_rx_msgs(struct fbnic_dev * fbd)665 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
666 {
667 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
668 u8 head = rx_mbx->head;
669 u64 desc, length;
670
671 while (head != rx_mbx->tail) {
672 struct fbnic_tlv_msg *msg;
673 int err;
674
675 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
676 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
677 break;
678
679 dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
680 PAGE_SIZE, DMA_FROM_DEVICE);
681
682 msg = rx_mbx->buf_info[head].msg;
683
684 length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
685
686 /* Ignore NULL mailbox descriptors */
687 if (!length)
688 goto next_page;
689
690 /* Report descriptors with length greater than page size */
691 if (length > PAGE_SIZE) {
692 dev_warn(fbd->dev,
693 "Invalid mailbox descriptor length: %lld\n",
694 length);
695 goto next_page;
696 }
697
698 if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
699 dev_warn(fbd->dev, "Mailbox message length mismatch\n");
700
701 /* If parsing fails dump contents of message to dmesg */
702 err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
703 if (err) {
704 dev_warn(fbd->dev, "Unable to process message: %d\n",
705 err);
706 print_hex_dump(KERN_WARNING, "fbnic:",
707 DUMP_PREFIX_OFFSET, 16, 2,
708 msg, length, true);
709 }
710
711 dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
712 next_page:
713
714 free_page((unsigned long)rx_mbx->buf_info[head].msg);
715 rx_mbx->buf_info[head].msg = NULL;
716
717 head++;
718 head %= FBNIC_IPC_MBX_DESC_LEN;
719 }
720
721 /* Record head for next interrupt */
722 rx_mbx->head = head;
723
724 /* Make sure we have at least one page for the FW to write to */
725 fbnic_mbx_alloc_rx_msgs(fbd);
726 }
727
fbnic_mbx_poll(struct fbnic_dev * fbd)728 void fbnic_mbx_poll(struct fbnic_dev *fbd)
729 {
730 fbnic_mbx_event(fbd);
731
732 fbnic_mbx_process_tx_msgs(fbd);
733 fbnic_mbx_process_rx_msgs(fbd);
734 }
735
fbnic_mbx_poll_tx_ready(struct fbnic_dev * fbd)736 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
737 {
738 unsigned long timeout = jiffies + 10 * HZ + 1;
739 int err, i;
740
741 do {
742 if (!time_is_after_jiffies(timeout))
743 return -ETIMEDOUT;
744
745 /* Force the firmware to trigger an interrupt response to
746 * avoid the mailbox getting stuck closed if the interrupt
747 * is reset.
748 */
749 fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
750
751 /* Immediate fail if BAR4 went away */
752 if (!fbnic_fw_present(fbd))
753 return -ENODEV;
754
755 msleep(20);
756 } while (!fbnic_mbx_event(fbd));
757
758 /* FW has shown signs of life. Enable DMA and start Tx/Rx */
759 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
760 fbnic_mbx_init_desc_ring(fbd, i);
761
762 /* Request an update from the firmware. This should overwrite
763 * mgmt.version once we get the actual version from the firmware
764 * in the capabilities request message.
765 */
766 err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
767 if (err)
768 goto clean_mbx;
769
770 /* Use "1" to indicate we entered the state waiting for a response */
771 fbd->fw_cap.running.mgmt.version = 1;
772
773 return 0;
774 clean_mbx:
775 /* Cleanup Rx buffers and disable mailbox */
776 fbnic_mbx_clean(fbd);
777 return err;
778 }
779
fbnic_mbx_flush_tx(struct fbnic_dev * fbd)780 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
781 {
782 unsigned long timeout = jiffies + 10 * HZ + 1;
783 struct fbnic_fw_mbx *tx_mbx;
784 u8 tail;
785
786 /* Record current Rx stats */
787 tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
788
789 spin_lock_irq(&fbd->fw_tx_lock);
790
791 /* Clear ready to prevent any further attempts to transmit */
792 tx_mbx->ready = false;
793
794 /* Read tail to determine the last tail state for the ring */
795 tail = tx_mbx->tail;
796
797 spin_unlock_irq(&fbd->fw_tx_lock);
798
799 /* Give firmware time to process packet,
800 * we will wait up to 10 seconds which is 500 waits of 20ms.
801 */
802 do {
803 u8 head = tx_mbx->head;
804
805 /* Tx ring is empty once head == tail */
806 if (head == tail)
807 break;
808
809 msleep(20);
810 fbnic_mbx_process_tx_msgs(fbd);
811 } while (time_is_after_jiffies(timeout));
812 }
813
fbnic_get_fw_ver_commit_str(struct fbnic_dev * fbd,char * fw_version,const size_t str_sz)814 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
815 const size_t str_sz)
816 {
817 struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
818 const char *delim = "";
819
820 if (mgmt->commit[0])
821 delim = "_";
822
823 fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
824 fw_version, str_sz);
825 }
826