1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/semaphore.h>
13 #include <linux/completion.h>
14 #include <linux/slab.h>
15 #include <net/devlink.h>
16 #include <asm/barrier.h>
17
18 #include "hinic_devlink.h"
19 #include "hinic_hw_if.h"
20 #include "hinic_hw_eqs.h"
21 #include "hinic_hw_api_cmd.h"
22 #include "hinic_hw_mgmt.h"
23 #include "hinic_hw_dev.h"
24
25 #define SYNC_MSG_ID_MASK 0x1FF
26
27 #define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
28
29 #define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
30 ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \
31 SYNC_MSG_ID_MASK))
32
33 #define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN)
34
35 #define MGMT_MSG_LEN_MIN 20
36 #define MGMT_MSG_LEN_STEP 16
37 #define MGMT_MSG_RSVD_FOR_DEV 8
38
39 #define SEGMENT_LEN 48
40
41 #define MAX_PF_MGMT_BUF_SIZE 2048
42
43 /* Data should be SEG LEN size aligned */
44 #define MAX_MSG_LEN 2016
45
46 #define MSG_NOT_RESP 0xFFFF
47
48 #define MGMT_MSG_TIMEOUT 5000
49
50 #define SET_FUNC_PORT_MBOX_TIMEOUT 30000
51
52 #define SET_FUNC_PORT_MGMT_TIMEOUT 25000
53
54 #define UPDATE_FW_MGMT_TIMEOUT 20000
55
56 #define mgmt_to_pfhwdev(pf_mgmt) \
57 container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
58
59 enum msg_segment_type {
60 NOT_LAST_SEGMENT = 0,
61 LAST_SEGMENT = 1,
62 };
63
64 enum mgmt_direction_type {
65 MGMT_DIRECT_SEND = 0,
66 MGMT_RESP = 1,
67 };
68
69 enum msg_ack_type {
70 MSG_ACK = 0,
71 MSG_NO_ACK = 1,
72 };
73
74 /**
75 * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module
76 * @pf_to_mgmt: PF to MGMT channel
77 * @mod: module in the chip that this handler will handle its messages
78 * @handle: private data for the callback
79 * @callback: the handler that will handle messages
80 **/
hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt * pf_to_mgmt,enum hinic_mod_type mod,void * handle,void (* callback)(void * handle,u8 cmd,void * buf_in,u16 in_size,void * buf_out,u16 * out_size))81 void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
82 enum hinic_mod_type mod,
83 void *handle,
84 void (*callback)(void *handle,
85 u8 cmd, void *buf_in,
86 u16 in_size, void *buf_out,
87 u16 *out_size))
88 {
89 struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
90
91 mgmt_cb->cb = callback;
92 mgmt_cb->handle = handle;
93 mgmt_cb->state = HINIC_MGMT_CB_ENABLED;
94 }
95
96 /**
97 * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module
98 * @pf_to_mgmt: PF to MGMT channel
99 * @mod: module in the chip that this handler handles its messages
100 **/
hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt * pf_to_mgmt,enum hinic_mod_type mod)101 void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
102 enum hinic_mod_type mod)
103 {
104 struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
105
106 mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED;
107
108 while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING)
109 schedule();
110
111 mgmt_cb->cb = NULL;
112 }
113
114 /**
115 * prepare_header - prepare the header of the message
116 * @pf_to_mgmt: PF to MGMT channel
117 * @msg_len: the length of the message
118 * @mod: module in the chip that will get the message
119 * @ack_type: ask for response
120 * @direction: the direction of the message
121 * @cmd: command of the message
122 * @msg_id: message id
123 *
124 * Return the prepared header value
125 **/
prepare_header(struct hinic_pf_to_mgmt * pf_to_mgmt,u16 msg_len,enum hinic_mod_type mod,enum msg_ack_type ack_type,enum mgmt_direction_type direction,u16 cmd,u16 msg_id)126 static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt,
127 u16 msg_len, enum hinic_mod_type mod,
128 enum msg_ack_type ack_type,
129 enum mgmt_direction_type direction,
130 u16 cmd, u16 msg_id)
131 {
132 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
133
134 return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
135 HINIC_MSG_HEADER_SET(mod, MODULE) |
136 HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) |
137 HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
138 HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
139 HINIC_MSG_HEADER_SET(0, SEQID) |
140 HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
141 HINIC_MSG_HEADER_SET(direction, DIRECTION) |
142 HINIC_MSG_HEADER_SET(cmd, CMD) |
143 HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) |
144 HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) |
145 HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
146 }
147
148 /**
149 * prepare_mgmt_cmd - prepare the mgmt command
150 * @mgmt_cmd: pointer to the command to prepare
151 * @header: pointer of the header for the message
152 * @msg: the data of the message
153 * @msg_len: the length of the message
154 **/
prepare_mgmt_cmd(u8 * mgmt_cmd,u64 * header,u8 * msg,u16 msg_len)155 static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len)
156 {
157 memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
158
159 mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
160 memcpy(mgmt_cmd, header, sizeof(*header));
161
162 mgmt_cmd += sizeof(*header);
163 memcpy(mgmt_cmd, msg, msg_len);
164 }
165
166 /**
167 * mgmt_msg_len - calculate the total message length
168 * @msg_data_len: the length of the message data
169 *
170 * Return the total message length
171 **/
mgmt_msg_len(u16 msg_data_len)172 static u16 mgmt_msg_len(u16 msg_data_len)
173 {
174 /* RSVD + HEADER_SIZE + DATA_LEN */
175 u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len;
176
177 if (msg_len > MGMT_MSG_LEN_MIN)
178 msg_len = MGMT_MSG_LEN_MIN +
179 ALIGN((msg_len - MGMT_MSG_LEN_MIN),
180 MGMT_MSG_LEN_STEP);
181 else
182 msg_len = MGMT_MSG_LEN_MIN;
183
184 return msg_len;
185 }
186
187 /**
188 * send_msg_to_mgmt - send message to mgmt by API CMD
189 * @pf_to_mgmt: PF to MGMT channel
190 * @mod: module in the chip that will get the message
191 * @cmd: command of the message
192 * @data: the msg data
193 * @data_len: the msg data length
194 * @ack_type: ask for response
195 * @direction: the direction of the original message
196 * @resp_msg_id: msg id to response for
197 *
198 * Return 0 - Success, negative - Failure
199 **/
send_msg_to_mgmt(struct hinic_pf_to_mgmt * pf_to_mgmt,enum hinic_mod_type mod,u8 cmd,u8 * data,u16 data_len,enum msg_ack_type ack_type,enum mgmt_direction_type direction,u16 resp_msg_id)200 static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
201 enum hinic_mod_type mod, u8 cmd,
202 u8 *data, u16 data_len,
203 enum msg_ack_type ack_type,
204 enum mgmt_direction_type direction,
205 u16 resp_msg_id)
206 {
207 struct hinic_api_cmd_chain *chain;
208 u64 header;
209 u16 msg_id;
210
211 msg_id = SYNC_MSG_ID(pf_to_mgmt);
212
213 if (direction == MGMT_RESP) {
214 header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
215 direction, cmd, resp_msg_id);
216 } else {
217 SYNC_MSG_ID_INC(pf_to_mgmt);
218 header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
219 direction, cmd, msg_id);
220 }
221
222 prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len);
223
224 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
225 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT,
226 pf_to_mgmt->sync_msg_buf,
227 mgmt_msg_len(data_len));
228 }
229
230 /**
231 * msg_to_mgmt_sync - send sync message to mgmt
232 * @pf_to_mgmt: PF to MGMT channel
233 * @mod: module in the chip that will get the message
234 * @cmd: command of the message
235 * @buf_in: the msg data
236 * @in_size: the msg data length
237 * @buf_out: response
238 * @out_size: response length
239 * @direction: the direction of the original message
240 * @resp_msg_id: msg id to response for
241 * @timeout: time-out period of waiting for response
242 *
243 * Return 0 - Success, negative - Failure
244 **/
msg_to_mgmt_sync(struct hinic_pf_to_mgmt * pf_to_mgmt,enum hinic_mod_type mod,u8 cmd,u8 * buf_in,u16 in_size,u8 * buf_out,u16 * out_size,enum mgmt_direction_type direction,u16 resp_msg_id,u32 timeout)245 static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
246 enum hinic_mod_type mod, u8 cmd,
247 u8 *buf_in, u16 in_size,
248 u8 *buf_out, u16 *out_size,
249 enum mgmt_direction_type direction,
250 u16 resp_msg_id, u32 timeout)
251 {
252 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
253 struct pci_dev *pdev = hwif->pdev;
254 struct hinic_recv_msg *recv_msg;
255 struct completion *recv_done;
256 unsigned long timeo;
257 u16 msg_id;
258 int err;
259
260 /* Lock the sync_msg_buf */
261 down(&pf_to_mgmt->sync_msg_lock);
262
263 recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
264 recv_done = &recv_msg->recv_done;
265
266 if (resp_msg_id == MSG_NOT_RESP)
267 msg_id = SYNC_MSG_ID(pf_to_mgmt);
268 else
269 msg_id = resp_msg_id;
270
271 init_completion(recv_done);
272
273 err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
274 MSG_ACK, direction, resp_msg_id);
275 if (err) {
276 dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n");
277 goto unlock_sync_msg;
278 }
279
280 timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
281
282 if (!wait_for_completion_timeout(recv_done, timeo)) {
283 dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
284 hinic_dump_aeq_info(pf_to_mgmt->hwdev);
285 err = -ETIMEDOUT;
286 goto unlock_sync_msg;
287 }
288
289 smp_rmb(); /* verify reading after completion */
290
291 if (recv_msg->msg_id != msg_id) {
292 dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id);
293 err = -EFAULT;
294 goto unlock_sync_msg;
295 }
296
297 if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) {
298 memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
299 *out_size = recv_msg->msg_len;
300 }
301
302 unlock_sync_msg:
303 up(&pf_to_mgmt->sync_msg_lock);
304 return err;
305 }
306
307 /**
308 * msg_to_mgmt_async - send message to mgmt without response
309 * @pf_to_mgmt: PF to MGMT channel
310 * @mod: module in the chip that will get the message
311 * @cmd: command of the message
312 * @buf_in: the msg data
313 * @in_size: the msg data length
314 * @direction: the direction of the original message
315 * @resp_msg_id: msg id to response for
316 *
317 * Return 0 - Success, negative - Failure
318 **/
msg_to_mgmt_async(struct hinic_pf_to_mgmt * pf_to_mgmt,enum hinic_mod_type mod,u8 cmd,u8 * buf_in,u16 in_size,enum mgmt_direction_type direction,u16 resp_msg_id)319 static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt,
320 enum hinic_mod_type mod, u8 cmd,
321 u8 *buf_in, u16 in_size,
322 enum mgmt_direction_type direction,
323 u16 resp_msg_id)
324 {
325 int err;
326
327 /* Lock the sync_msg_buf */
328 down(&pf_to_mgmt->sync_msg_lock);
329
330 err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
331 MSG_NO_ACK, direction, resp_msg_id);
332
333 up(&pf_to_mgmt->sync_msg_lock);
334 return err;
335 }
336
337 /**
338 * hinic_msg_to_mgmt - send message to mgmt
339 * @pf_to_mgmt: PF to MGMT channel
340 * @mod: module in the chip that will get the message
341 * @cmd: command of the message
342 * @buf_in: the msg data
343 * @in_size: the msg data length
344 * @buf_out: response
345 * @out_size: returned response length
346 * @sync: sync msg or async msg
347 *
348 * Return 0 - Success, negative - Failure
349 **/
hinic_msg_to_mgmt(struct hinic_pf_to_mgmt * pf_to_mgmt,enum hinic_mod_type mod,u8 cmd,void * buf_in,u16 in_size,void * buf_out,u16 * out_size,enum hinic_mgmt_msg_type sync)350 int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
351 enum hinic_mod_type mod, u8 cmd,
352 void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
353 enum hinic_mgmt_msg_type sync)
354 {
355 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
356 struct pci_dev *pdev = hwif->pdev;
357 u32 timeout = 0;
358
359 if (sync != HINIC_MGMT_MSG_SYNC) {
360 dev_err(&pdev->dev, "Invalid MGMT msg type\n");
361 return -EINVAL;
362 }
363
364 if (!MSG_SZ_IS_VALID(in_size)) {
365 dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n");
366 return -EINVAL;
367 }
368
369 if (HINIC_IS_VF(hwif)) {
370 if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
371 timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
372
373 return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
374 in_size, buf_out, out_size, timeout);
375 } else {
376 if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
377 timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
378 else if (cmd == HINIC_PORT_CMD_UPDATE_FW)
379 timeout = UPDATE_FW_MGMT_TIMEOUT;
380
381 return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
382 buf_out, out_size, MGMT_DIRECT_SEND,
383 MSG_NOT_RESP, timeout);
384 }
385 }
386
recv_mgmt_msg_work_handler(struct work_struct * work)387 static void recv_mgmt_msg_work_handler(struct work_struct *work)
388 {
389 struct hinic_mgmt_msg_handle_work *mgmt_work =
390 container_of(work, struct hinic_mgmt_msg_handle_work, work);
391 struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
392 struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
393 u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
394 struct hinic_mgmt_cb *mgmt_cb;
395 unsigned long cb_state;
396 u16 out_size = 0;
397
398 memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
399
400 if (mgmt_work->mod >= HINIC_MOD_MAX) {
401 dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
402 mgmt_work->mod);
403 kfree(mgmt_work->msg);
404 kfree(mgmt_work);
405 return;
406 }
407
408 mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
409
410 cb_state = cmpxchg(&mgmt_cb->state,
411 HINIC_MGMT_CB_ENABLED,
412 HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
413
414 if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
415 mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
416 mgmt_work->msg, mgmt_work->msg_len,
417 buf_out, &out_size);
418 else
419 dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
420 mgmt_work->mod, mgmt_work->cmd);
421
422 mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
423
424 if (!mgmt_work->async_mgmt_to_pf)
425 /* MGMT sent sync msg, send the response */
426 msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
427 buf_out, out_size, MGMT_RESP,
428 mgmt_work->msg_id);
429
430 kfree(mgmt_work->msg);
431 kfree(mgmt_work);
432 }
433
434 /**
435 * mgmt_recv_msg_handler - handler for message from mgmt cpu
436 * @pf_to_mgmt: PF to MGMT channel
437 * @recv_msg: received message details
438 **/
mgmt_recv_msg_handler(struct hinic_pf_to_mgmt * pf_to_mgmt,struct hinic_recv_msg * recv_msg)439 static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
440 struct hinic_recv_msg *recv_msg)
441 {
442 struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
443 struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
444
445 mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
446 if (!mgmt_work) {
447 dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
448 return;
449 }
450
451 if (recv_msg->msg_len) {
452 mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
453 if (!mgmt_work->msg) {
454 dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
455 kfree(mgmt_work);
456 return;
457 }
458 }
459
460 mgmt_work->pf_to_mgmt = pf_to_mgmt;
461 mgmt_work->msg_len = recv_msg->msg_len;
462 memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
463 mgmt_work->msg_id = recv_msg->msg_id;
464 mgmt_work->mod = recv_msg->mod;
465 mgmt_work->cmd = recv_msg->cmd;
466 mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
467
468 INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
469 queue_work(pf_to_mgmt->workq, &mgmt_work->work);
470 }
471
472 /**
473 * mgmt_resp_msg_handler - handler for a response message from mgmt cpu
474 * @pf_to_mgmt: PF to MGMT channel
475 * @recv_msg: received message details
476 **/
mgmt_resp_msg_handler(struct hinic_pf_to_mgmt * pf_to_mgmt,struct hinic_recv_msg * recv_msg)477 static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
478 struct hinic_recv_msg *recv_msg)
479 {
480 wmb(); /* verify writing all, before reading */
481
482 complete(&recv_msg->recv_done);
483 }
484
485 /**
486 * recv_mgmt_msg_handler - handler for a message from mgmt cpu
487 * @pf_to_mgmt: PF to MGMT channel
488 * @header: the header of the message
489 * @recv_msg: received message details
490 **/
recv_mgmt_msg_handler(struct hinic_pf_to_mgmt * pf_to_mgmt,u64 * header,struct hinic_recv_msg * recv_msg)491 static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
492 u64 *header, struct hinic_recv_msg *recv_msg)
493 {
494 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
495 struct pci_dev *pdev = hwif->pdev;
496 int seq_id, seg_len;
497 u8 *msg_body;
498
499 seq_id = HINIC_MSG_HEADER_GET(*header, SEQID);
500 seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN);
501
502 if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) {
503 dev_err(&pdev->dev, "recv big mgmt msg\n");
504 return;
505 }
506
507 msg_body = (u8 *)header + sizeof(*header);
508 memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len);
509
510 if (!HINIC_MSG_HEADER_GET(*header, LAST))
511 return;
512
513 recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD);
514 recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE);
515 recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header,
516 ASYNC_MGMT_TO_PF);
517 recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN);
518 recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID);
519
520 if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP)
521 mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
522 else
523 mgmt_recv_msg_handler(pf_to_mgmt, recv_msg);
524 }
525
526 /**
527 * mgmt_msg_aeqe_handler - handler for a mgmt message event
528 * @handle: PF to MGMT channel
529 * @data: the header of the message
530 * @size: unused
531 **/
mgmt_msg_aeqe_handler(void * handle,void * data,u8 size)532 static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size)
533 {
534 struct hinic_pf_to_mgmt *pf_to_mgmt = handle;
535 struct hinic_recv_msg *recv_msg;
536 u64 *header = (u64 *)data;
537
538 recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) ==
539 MGMT_DIRECT_SEND ?
540 &pf_to_mgmt->recv_msg_from_mgmt :
541 &pf_to_mgmt->recv_resp_msg_from_mgmt;
542
543 recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
544 }
545
546 /**
547 * alloc_recv_msg - allocate receive message memory
548 * @pf_to_mgmt: PF to MGMT channel
549 * @recv_msg: pointer that will hold the allocated data
550 *
551 * Return 0 - Success, negative - Failure
552 **/
alloc_recv_msg(struct hinic_pf_to_mgmt * pf_to_mgmt,struct hinic_recv_msg * recv_msg)553 static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt,
554 struct hinic_recv_msg *recv_msg)
555 {
556 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
557 struct pci_dev *pdev = hwif->pdev;
558
559 recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
560 GFP_KERNEL);
561 if (!recv_msg->msg)
562 return -ENOMEM;
563
564 recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
565 GFP_KERNEL);
566 if (!recv_msg->buf_out)
567 return -ENOMEM;
568
569 return 0;
570 }
571
572 /**
573 * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
574 * @pf_to_mgmt: PF to MGMT channel
575 *
576 * Return 0 - Success, negative - Failure
577 **/
alloc_msg_buf(struct hinic_pf_to_mgmt * pf_to_mgmt)578 static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
579 {
580 struct hinic_hwif *hwif = pf_to_mgmt->hwif;
581 struct pci_dev *pdev = hwif->pdev;
582 int err;
583
584 err = alloc_recv_msg(pf_to_mgmt,
585 &pf_to_mgmt->recv_msg_from_mgmt);
586 if (err) {
587 dev_err(&pdev->dev, "Failed to allocate recv msg\n");
588 return err;
589 }
590
591 err = alloc_recv_msg(pf_to_mgmt,
592 &pf_to_mgmt->recv_resp_msg_from_mgmt);
593 if (err) {
594 dev_err(&pdev->dev, "Failed to allocate resp recv msg\n");
595 return err;
596 }
597
598 pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev,
599 MAX_PF_MGMT_BUF_SIZE,
600 GFP_KERNEL);
601 if (!pf_to_mgmt->sync_msg_buf)
602 return -ENOMEM;
603
604 pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
605 MAX_PF_MGMT_BUF_SIZE,
606 GFP_KERNEL);
607 if (!pf_to_mgmt->mgmt_ack_buf)
608 return -ENOMEM;
609
610 return 0;
611 }
612
613 /**
614 * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
615 * @pf_to_mgmt: PF to MGMT channel
616 * @hwif: HW interface the PF to MGMT will use for accessing HW
617 *
618 * Return 0 - Success, negative - Failure
619 **/
hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt * pf_to_mgmt,struct hinic_hwif * hwif)620 int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
621 struct hinic_hwif *hwif)
622 {
623 struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
624 struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
625 struct pci_dev *pdev = hwif->pdev;
626 int err;
627
628 pf_to_mgmt->hwif = hwif;
629 pf_to_mgmt->hwdev = hwdev;
630
631 if (HINIC_IS_VF(hwif))
632 return 0;
633
634 err = hinic_health_reporters_create(hwdev->devlink_dev);
635 if (err)
636 return err;
637
638 sema_init(&pf_to_mgmt->sync_msg_lock, 1);
639 pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
640 if (!pf_to_mgmt->workq) {
641 dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
642 hinic_health_reporters_destroy(hwdev->devlink_dev);
643 return -ENOMEM;
644 }
645 pf_to_mgmt->sync_msg_id = 0;
646
647 err = alloc_msg_buf(pf_to_mgmt);
648 if (err) {
649 dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
650 destroy_workqueue(pf_to_mgmt->workq);
651 hinic_health_reporters_destroy(hwdev->devlink_dev);
652 return err;
653 }
654
655 err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
656 if (err) {
657 dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
658 destroy_workqueue(pf_to_mgmt->workq);
659 hinic_health_reporters_destroy(hwdev->devlink_dev);
660 return err;
661 }
662
663 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU,
664 pf_to_mgmt,
665 mgmt_msg_aeqe_handler);
666 return 0;
667 }
668
669 /**
670 * hinic_pf_to_mgmt_free - free PF to MGMT channel
671 * @pf_to_mgmt: PF to MGMT channel
672 **/
hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt * pf_to_mgmt)673 void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
674 {
675 struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
676 struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
677
678 if (HINIC_IS_VF(hwdev->hwif))
679 return;
680
681 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
682 hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
683 destroy_workqueue(pf_to_mgmt->workq);
684 hinic_health_reporters_destroy(hwdev->devlink_dev);
685 }
686