1 /*
2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/eq.h>
44 #include <linux/debugfs.h>
45
46 #include "mlx5_core.h"
47 #include "lib/eq.h"
48
49 enum {
50 CMD_IF_REV = 5,
51 };
52
53 enum {
54 CMD_MODE_POLLING,
55 CMD_MODE_EVENTS
56 };
57
58 enum {
59 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
60 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
61 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
62 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
63 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
64 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
65 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
66 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
67 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
68 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
69 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
70 };
71
72 static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd * cmd,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t cbk,void * context,int page_queue)73 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
74 struct mlx5_cmd_msg *out, void *uout, int uout_size,
75 mlx5_cmd_cbk_t cbk, void *context, int page_queue)
76 {
77 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
78 struct mlx5_cmd_work_ent *ent;
79
80 ent = kzalloc(sizeof(*ent), alloc_flags);
81 if (!ent)
82 return ERR_PTR(-ENOMEM);
83
84 ent->idx = -EINVAL;
85 ent->in = in;
86 ent->out = out;
87 ent->uout = uout;
88 ent->uout_size = uout_size;
89 ent->callback = cbk;
90 ent->context = context;
91 ent->cmd = cmd;
92 ent->page_queue = page_queue;
93 refcount_set(&ent->refcnt, 1);
94
95 return ent;
96 }
97
cmd_free_ent(struct mlx5_cmd_work_ent * ent)98 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
99 {
100 kfree(ent);
101 }
102
alloc_token(struct mlx5_cmd * cmd)103 static u8 alloc_token(struct mlx5_cmd *cmd)
104 {
105 u8 token;
106
107 spin_lock(&cmd->token_lock);
108 cmd->token++;
109 if (cmd->token == 0)
110 cmd->token++;
111 token = cmd->token;
112 spin_unlock(&cmd->token_lock);
113
114 return token;
115 }
116
cmd_alloc_index(struct mlx5_cmd * cmd)117 static int cmd_alloc_index(struct mlx5_cmd *cmd)
118 {
119 unsigned long flags;
120 int ret;
121
122 spin_lock_irqsave(&cmd->alloc_lock, flags);
123 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
124 if (ret < cmd->max_reg_cmds)
125 clear_bit(ret, &cmd->bitmask);
126 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
127
128 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
129 }
130
cmd_free_index(struct mlx5_cmd * cmd,int idx)131 static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
132 {
133 unsigned long flags;
134
135 spin_lock_irqsave(&cmd->alloc_lock, flags);
136 set_bit(idx, &cmd->bitmask);
137 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
138 }
139
cmd_ent_get(struct mlx5_cmd_work_ent * ent)140 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
141 {
142 refcount_inc(&ent->refcnt);
143 }
144
cmd_ent_put(struct mlx5_cmd_work_ent * ent)145 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
146 {
147 if (!refcount_dec_and_test(&ent->refcnt))
148 return;
149
150 if (ent->idx >= 0) {
151 struct mlx5_cmd *cmd = ent->cmd;
152
153 cmd_free_index(cmd, ent->idx);
154 up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
155 }
156
157 cmd_free_ent(ent);
158 }
159
get_inst(struct mlx5_cmd * cmd,int idx)160 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
161 {
162 return cmd->cmd_buf + (idx << cmd->log_stride);
163 }
164
mlx5_calc_cmd_blocks(struct mlx5_cmd_msg * msg)165 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
166 {
167 int size = msg->len;
168 int blen = size - min_t(int, sizeof(msg->first.data), size);
169
170 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
171 }
172
xor8_buf(void * buf,size_t offset,int len)173 static u8 xor8_buf(void *buf, size_t offset, int len)
174 {
175 u8 *ptr = buf;
176 u8 sum = 0;
177 int i;
178 int end = len + offset;
179
180 for (i = offset; i < end; i++)
181 sum ^= ptr[i];
182
183 return sum;
184 }
185
verify_block_sig(struct mlx5_cmd_prot_block * block)186 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
187 {
188 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
189 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
190
191 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
192 return -EINVAL;
193
194 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
195 return -EINVAL;
196
197 return 0;
198 }
199
calc_block_sig(struct mlx5_cmd_prot_block * block)200 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
201 {
202 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
203 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
204
205 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
206 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
207 }
208
calc_chain_sig(struct mlx5_cmd_msg * msg)209 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
210 {
211 struct mlx5_cmd_mailbox *next = msg->next;
212 int n = mlx5_calc_cmd_blocks(msg);
213 int i = 0;
214
215 for (i = 0; i < n && next; i++) {
216 calc_block_sig(next->buf);
217 next = next->next;
218 }
219 }
220
set_signature(struct mlx5_cmd_work_ent * ent,int csum)221 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
222 {
223 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
224 if (csum) {
225 calc_chain_sig(ent->in);
226 calc_chain_sig(ent->out);
227 }
228 }
229
poll_timeout(struct mlx5_cmd_work_ent * ent)230 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
231 {
232 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
233 u8 own;
234
235 do {
236 own = READ_ONCE(ent->lay->status_own);
237 if (!(own & CMD_OWNER_HW)) {
238 ent->ret = 0;
239 return;
240 }
241 cond_resched();
242 } while (time_before(jiffies, poll_end));
243
244 ent->ret = -ETIMEDOUT;
245 }
246
verify_signature(struct mlx5_cmd_work_ent * ent)247 static int verify_signature(struct mlx5_cmd_work_ent *ent)
248 {
249 struct mlx5_cmd_mailbox *next = ent->out->next;
250 int n = mlx5_calc_cmd_blocks(ent->out);
251 int err;
252 u8 sig;
253 int i = 0;
254
255 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
256 if (sig != 0xff)
257 return -EINVAL;
258
259 for (i = 0; i < n && next; i++) {
260 err = verify_block_sig(next->buf);
261 if (err)
262 return err;
263
264 next = next->next;
265 }
266
267 return 0;
268 }
269
dump_buf(void * buf,int size,int data_only,int offset)270 static void dump_buf(void *buf, int size, int data_only, int offset)
271 {
272 __be32 *p = buf;
273 int i;
274
275 for (i = 0; i < size; i += 16) {
276 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
277 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
278 be32_to_cpu(p[3]));
279 p += 4;
280 offset += 16;
281 }
282 if (!data_only)
283 pr_debug("\n");
284 }
285
mlx5_internal_err_ret_value(struct mlx5_core_dev * dev,u16 op,u32 * synd,u8 * status)286 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
287 u32 *synd, u8 *status)
288 {
289 *synd = 0;
290 *status = 0;
291
292 switch (op) {
293 case MLX5_CMD_OP_TEARDOWN_HCA:
294 case MLX5_CMD_OP_DISABLE_HCA:
295 case MLX5_CMD_OP_MANAGE_PAGES:
296 case MLX5_CMD_OP_DESTROY_MKEY:
297 case MLX5_CMD_OP_DESTROY_EQ:
298 case MLX5_CMD_OP_DESTROY_CQ:
299 case MLX5_CMD_OP_DESTROY_QP:
300 case MLX5_CMD_OP_DESTROY_PSV:
301 case MLX5_CMD_OP_DESTROY_SRQ:
302 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
303 case MLX5_CMD_OP_DESTROY_XRQ:
304 case MLX5_CMD_OP_DESTROY_DCT:
305 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
306 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
307 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
308 case MLX5_CMD_OP_DEALLOC_PD:
309 case MLX5_CMD_OP_DEALLOC_UAR:
310 case MLX5_CMD_OP_DETACH_FROM_MCG:
311 case MLX5_CMD_OP_DEALLOC_XRCD:
312 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
313 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
314 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
315 case MLX5_CMD_OP_DESTROY_LAG:
316 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
317 case MLX5_CMD_OP_DESTROY_TIR:
318 case MLX5_CMD_OP_DESTROY_SQ:
319 case MLX5_CMD_OP_DESTROY_RQ:
320 case MLX5_CMD_OP_DESTROY_RMP:
321 case MLX5_CMD_OP_DESTROY_TIS:
322 case MLX5_CMD_OP_DESTROY_RQT:
323 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
324 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
325 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
326 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
327 case MLX5_CMD_OP_2ERR_QP:
328 case MLX5_CMD_OP_2RST_QP:
329 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
330 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
331 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
332 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
333 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
334 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
335 case MLX5_CMD_OP_FPGA_DESTROY_QP:
336 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
337 case MLX5_CMD_OP_DEALLOC_MEMIC:
338 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
339 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
340 return MLX5_CMD_STAT_OK;
341
342 case MLX5_CMD_OP_QUERY_HCA_CAP:
343 case MLX5_CMD_OP_QUERY_ADAPTER:
344 case MLX5_CMD_OP_INIT_HCA:
345 case MLX5_CMD_OP_ENABLE_HCA:
346 case MLX5_CMD_OP_QUERY_PAGES:
347 case MLX5_CMD_OP_SET_HCA_CAP:
348 case MLX5_CMD_OP_QUERY_ISSI:
349 case MLX5_CMD_OP_SET_ISSI:
350 case MLX5_CMD_OP_CREATE_MKEY:
351 case MLX5_CMD_OP_QUERY_MKEY:
352 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
353 case MLX5_CMD_OP_CREATE_EQ:
354 case MLX5_CMD_OP_QUERY_EQ:
355 case MLX5_CMD_OP_GEN_EQE:
356 case MLX5_CMD_OP_CREATE_CQ:
357 case MLX5_CMD_OP_QUERY_CQ:
358 case MLX5_CMD_OP_MODIFY_CQ:
359 case MLX5_CMD_OP_CREATE_QP:
360 case MLX5_CMD_OP_RST2INIT_QP:
361 case MLX5_CMD_OP_INIT2RTR_QP:
362 case MLX5_CMD_OP_RTR2RTS_QP:
363 case MLX5_CMD_OP_RTS2RTS_QP:
364 case MLX5_CMD_OP_SQERR2RTS_QP:
365 case MLX5_CMD_OP_QUERY_QP:
366 case MLX5_CMD_OP_SQD_RTS_QP:
367 case MLX5_CMD_OP_INIT2INIT_QP:
368 case MLX5_CMD_OP_CREATE_PSV:
369 case MLX5_CMD_OP_CREATE_SRQ:
370 case MLX5_CMD_OP_QUERY_SRQ:
371 case MLX5_CMD_OP_ARM_RQ:
372 case MLX5_CMD_OP_CREATE_XRC_SRQ:
373 case MLX5_CMD_OP_QUERY_XRC_SRQ:
374 case MLX5_CMD_OP_ARM_XRC_SRQ:
375 case MLX5_CMD_OP_CREATE_XRQ:
376 case MLX5_CMD_OP_QUERY_XRQ:
377 case MLX5_CMD_OP_ARM_XRQ:
378 case MLX5_CMD_OP_CREATE_DCT:
379 case MLX5_CMD_OP_DRAIN_DCT:
380 case MLX5_CMD_OP_QUERY_DCT:
381 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
382 case MLX5_CMD_OP_QUERY_VPORT_STATE:
383 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
384 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
385 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
386 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
387 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
388 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
389 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
390 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
391 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
392 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
393 case MLX5_CMD_OP_QUERY_VNIC_ENV:
394 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
395 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
396 case MLX5_CMD_OP_QUERY_Q_COUNTER:
397 case MLX5_CMD_OP_SET_MONITOR_COUNTER:
398 case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
399 case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
400 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
401 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
402 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
403 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
404 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
405 case MLX5_CMD_OP_ALLOC_PD:
406 case MLX5_CMD_OP_ALLOC_UAR:
407 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
408 case MLX5_CMD_OP_ACCESS_REG:
409 case MLX5_CMD_OP_ATTACH_TO_MCG:
410 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
411 case MLX5_CMD_OP_MAD_IFC:
412 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
413 case MLX5_CMD_OP_SET_MAD_DEMUX:
414 case MLX5_CMD_OP_NOP:
415 case MLX5_CMD_OP_ALLOC_XRCD:
416 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
417 case MLX5_CMD_OP_QUERY_CONG_STATUS:
418 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
419 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
420 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
421 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
422 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
423 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
424 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
425 case MLX5_CMD_OP_CREATE_LAG:
426 case MLX5_CMD_OP_MODIFY_LAG:
427 case MLX5_CMD_OP_QUERY_LAG:
428 case MLX5_CMD_OP_CREATE_VPORT_LAG:
429 case MLX5_CMD_OP_CREATE_TIR:
430 case MLX5_CMD_OP_MODIFY_TIR:
431 case MLX5_CMD_OP_QUERY_TIR:
432 case MLX5_CMD_OP_CREATE_SQ:
433 case MLX5_CMD_OP_MODIFY_SQ:
434 case MLX5_CMD_OP_QUERY_SQ:
435 case MLX5_CMD_OP_CREATE_RQ:
436 case MLX5_CMD_OP_MODIFY_RQ:
437 case MLX5_CMD_OP_QUERY_RQ:
438 case MLX5_CMD_OP_CREATE_RMP:
439 case MLX5_CMD_OP_MODIFY_RMP:
440 case MLX5_CMD_OP_QUERY_RMP:
441 case MLX5_CMD_OP_CREATE_TIS:
442 case MLX5_CMD_OP_MODIFY_TIS:
443 case MLX5_CMD_OP_QUERY_TIS:
444 case MLX5_CMD_OP_CREATE_RQT:
445 case MLX5_CMD_OP_MODIFY_RQT:
446 case MLX5_CMD_OP_QUERY_RQT:
447
448 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
449 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
450 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
451 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
452 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
453 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
454 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
455 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
456 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
457 case MLX5_CMD_OP_FPGA_CREATE_QP:
458 case MLX5_CMD_OP_FPGA_MODIFY_QP:
459 case MLX5_CMD_OP_FPGA_QUERY_QP:
460 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
461 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
462 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
463 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
464 case MLX5_CMD_OP_CREATE_UCTX:
465 case MLX5_CMD_OP_DESTROY_UCTX:
466 case MLX5_CMD_OP_CREATE_UMEM:
467 case MLX5_CMD_OP_DESTROY_UMEM:
468 case MLX5_CMD_OP_ALLOC_MEMIC:
469 case MLX5_CMD_OP_MODIFY_XRQ:
470 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
471 *status = MLX5_DRIVER_STATUS_ABORTED;
472 *synd = MLX5_DRIVER_SYND;
473 return -EIO;
474 default:
475 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
476 return -EINVAL;
477 }
478 }
479
mlx5_command_str(int command)480 const char *mlx5_command_str(int command)
481 {
482 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
483
484 switch (command) {
485 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
486 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
487 MLX5_COMMAND_STR_CASE(INIT_HCA);
488 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
489 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
490 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
491 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
492 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
493 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
494 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
495 MLX5_COMMAND_STR_CASE(SET_ISSI);
496 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
497 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
498 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
499 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
500 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
501 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
502 MLX5_COMMAND_STR_CASE(CREATE_EQ);
503 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
504 MLX5_COMMAND_STR_CASE(QUERY_EQ);
505 MLX5_COMMAND_STR_CASE(GEN_EQE);
506 MLX5_COMMAND_STR_CASE(CREATE_CQ);
507 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
508 MLX5_COMMAND_STR_CASE(QUERY_CQ);
509 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
510 MLX5_COMMAND_STR_CASE(CREATE_QP);
511 MLX5_COMMAND_STR_CASE(DESTROY_QP);
512 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
513 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
514 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
515 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
516 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
517 MLX5_COMMAND_STR_CASE(2ERR_QP);
518 MLX5_COMMAND_STR_CASE(2RST_QP);
519 MLX5_COMMAND_STR_CASE(QUERY_QP);
520 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
521 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
522 MLX5_COMMAND_STR_CASE(CREATE_PSV);
523 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
524 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
525 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
526 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
527 MLX5_COMMAND_STR_CASE(ARM_RQ);
528 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
529 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
530 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
531 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
532 MLX5_COMMAND_STR_CASE(CREATE_DCT);
533 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
534 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
535 MLX5_COMMAND_STR_CASE(QUERY_DCT);
536 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
537 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
538 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
539 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
540 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
541 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
542 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
543 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
544 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
545 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
546 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
547 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
548 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
549 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
550 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
551 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
552 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
553 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
554 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
555 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
556 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
557 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
558 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
559 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
560 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
561 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
562 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
563 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
564 MLX5_COMMAND_STR_CASE(ALLOC_PD);
565 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
566 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
567 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
568 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
569 MLX5_COMMAND_STR_CASE(ACCESS_REG);
570 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
571 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
572 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
573 MLX5_COMMAND_STR_CASE(MAD_IFC);
574 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
575 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
576 MLX5_COMMAND_STR_CASE(NOP);
577 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
578 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
579 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
580 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
581 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
582 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
583 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
584 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
585 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
586 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
587 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
588 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
589 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
590 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
591 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
592 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
593 MLX5_COMMAND_STR_CASE(CREATE_LAG);
594 MLX5_COMMAND_STR_CASE(MODIFY_LAG);
595 MLX5_COMMAND_STR_CASE(QUERY_LAG);
596 MLX5_COMMAND_STR_CASE(DESTROY_LAG);
597 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
598 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
599 MLX5_COMMAND_STR_CASE(CREATE_TIR);
600 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
601 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
602 MLX5_COMMAND_STR_CASE(QUERY_TIR);
603 MLX5_COMMAND_STR_CASE(CREATE_SQ);
604 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
605 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
606 MLX5_COMMAND_STR_CASE(QUERY_SQ);
607 MLX5_COMMAND_STR_CASE(CREATE_RQ);
608 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
609 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
610 MLX5_COMMAND_STR_CASE(QUERY_RQ);
611 MLX5_COMMAND_STR_CASE(CREATE_RMP);
612 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
613 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
614 MLX5_COMMAND_STR_CASE(QUERY_RMP);
615 MLX5_COMMAND_STR_CASE(CREATE_TIS);
616 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
617 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
618 MLX5_COMMAND_STR_CASE(QUERY_TIS);
619 MLX5_COMMAND_STR_CASE(CREATE_RQT);
620 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
621 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
622 MLX5_COMMAND_STR_CASE(QUERY_RQT);
623 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
624 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
625 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
626 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
627 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
628 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
629 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
630 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
631 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
632 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
633 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
634 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
635 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
636 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
637 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
638 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
639 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
640 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
641 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
642 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
643 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
644 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
645 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
646 MLX5_COMMAND_STR_CASE(CREATE_XRQ);
647 MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
648 MLX5_COMMAND_STR_CASE(QUERY_XRQ);
649 MLX5_COMMAND_STR_CASE(ARM_XRQ);
650 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
651 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
652 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
653 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
654 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
655 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
656 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
657 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
658 MLX5_COMMAND_STR_CASE(CREATE_UCTX);
659 MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
660 MLX5_COMMAND_STR_CASE(CREATE_UMEM);
661 MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
662 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
663 MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
664 default: return "unknown command opcode";
665 }
666 }
667
cmd_status_str(u8 status)668 static const char *cmd_status_str(u8 status)
669 {
670 switch (status) {
671 case MLX5_CMD_STAT_OK:
672 return "OK";
673 case MLX5_CMD_STAT_INT_ERR:
674 return "internal error";
675 case MLX5_CMD_STAT_BAD_OP_ERR:
676 return "bad operation";
677 case MLX5_CMD_STAT_BAD_PARAM_ERR:
678 return "bad parameter";
679 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
680 return "bad system state";
681 case MLX5_CMD_STAT_BAD_RES_ERR:
682 return "bad resource";
683 case MLX5_CMD_STAT_RES_BUSY:
684 return "resource busy";
685 case MLX5_CMD_STAT_LIM_ERR:
686 return "limits exceeded";
687 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
688 return "bad resource state";
689 case MLX5_CMD_STAT_IX_ERR:
690 return "bad index";
691 case MLX5_CMD_STAT_NO_RES_ERR:
692 return "no resources";
693 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
694 return "bad input length";
695 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
696 return "bad output length";
697 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
698 return "bad QP state";
699 case MLX5_CMD_STAT_BAD_PKT_ERR:
700 return "bad packet (discarded)";
701 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
702 return "bad size too many outstanding CQEs";
703 default:
704 return "unknown status";
705 }
706 }
707
cmd_status_to_err(u8 status)708 static int cmd_status_to_err(u8 status)
709 {
710 switch (status) {
711 case MLX5_CMD_STAT_OK: return 0;
712 case MLX5_CMD_STAT_INT_ERR: return -EIO;
713 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
714 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
715 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
716 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
717 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
718 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
719 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
720 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
721 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
722 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
723 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
724 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
725 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
726 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
727 default: return -EIO;
728 }
729 }
730
731 struct mlx5_ifc_mbox_out_bits {
732 u8 status[0x8];
733 u8 reserved_at_8[0x18];
734
735 u8 syndrome[0x20];
736
737 u8 reserved_at_40[0x40];
738 };
739
740 struct mlx5_ifc_mbox_in_bits {
741 u8 opcode[0x10];
742 u8 uid[0x10];
743
744 u8 reserved_at_20[0x10];
745 u8 op_mod[0x10];
746
747 u8 reserved_at_40[0x40];
748 };
749
mlx5_cmd_mbox_status(void * out,u8 * status,u32 * syndrome)750 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
751 {
752 *status = MLX5_GET(mbox_out, out, status);
753 *syndrome = MLX5_GET(mbox_out, out, syndrome);
754 }
755
mlx5_cmd_check(struct mlx5_core_dev * dev,void * in,void * out)756 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
757 {
758 u32 syndrome;
759 u8 status;
760 u16 opcode;
761 u16 op_mod;
762 u16 uid;
763
764 mlx5_cmd_mbox_status(out, &status, &syndrome);
765 if (!status)
766 return 0;
767
768 opcode = MLX5_GET(mbox_in, in, opcode);
769 op_mod = MLX5_GET(mbox_in, in, op_mod);
770 uid = MLX5_GET(mbox_in, in, uid);
771
772 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
773 mlx5_core_err_rl(dev,
774 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
775 mlx5_command_str(opcode), opcode, op_mod,
776 cmd_status_str(status), status, syndrome);
777 else
778 mlx5_core_dbg(dev,
779 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
780 mlx5_command_str(opcode),
781 opcode, op_mod,
782 cmd_status_str(status),
783 status,
784 syndrome);
785
786 return cmd_status_to_err(status);
787 }
788
dump_command(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent,int input)789 static void dump_command(struct mlx5_core_dev *dev,
790 struct mlx5_cmd_work_ent *ent, int input)
791 {
792 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
793 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
794 struct mlx5_cmd_mailbox *next = msg->next;
795 int n = mlx5_calc_cmd_blocks(msg);
796 int data_only;
797 u32 offset = 0;
798 int dump_len;
799 int i;
800
801 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
802
803 if (data_only)
804 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
805 "dump command data %s(0x%x) %s\n",
806 mlx5_command_str(op), op,
807 input ? "INPUT" : "OUTPUT");
808 else
809 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
810 mlx5_command_str(op), op,
811 input ? "INPUT" : "OUTPUT");
812
813 if (data_only) {
814 if (input) {
815 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
816 offset += sizeof(ent->lay->in);
817 } else {
818 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
819 offset += sizeof(ent->lay->out);
820 }
821 } else {
822 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
823 offset += sizeof(*ent->lay);
824 }
825
826 for (i = 0; i < n && next; i++) {
827 if (data_only) {
828 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
829 dump_buf(next->buf, dump_len, 1, offset);
830 offset += MLX5_CMD_DATA_BLOCK_SIZE;
831 } else {
832 mlx5_core_dbg(dev, "command block:\n");
833 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
834 offset += sizeof(struct mlx5_cmd_prot_block);
835 }
836 next = next->next;
837 }
838
839 if (data_only)
840 pr_debug("\n");
841 }
842
msg_to_opcode(struct mlx5_cmd_msg * in)843 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
844 {
845 return MLX5_GET(mbox_in, in->first.data, opcode);
846 }
847
848 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
849
cb_timeout_handler(struct work_struct * work)850 static void cb_timeout_handler(struct work_struct *work)
851 {
852 struct delayed_work *dwork = container_of(work, struct delayed_work,
853 work);
854 struct mlx5_cmd_work_ent *ent = container_of(dwork,
855 struct mlx5_cmd_work_ent,
856 cb_timeout_work);
857 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
858 cmd);
859
860 mlx5_cmd_eq_recover(dev);
861
862 /* Maybe got handled by eq recover ? */
863 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
864 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
865 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
866 goto out; /* phew, already handled */
867 }
868
869 ent->ret = -ETIMEDOUT;
870 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
871 ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
872 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
873
874 out:
875 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
876 }
877
878 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
879 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
880 struct mlx5_cmd_msg *msg);
881
opcode_allowed(struct mlx5_cmd * cmd,u16 opcode)882 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
883 {
884 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
885 return true;
886
887 return cmd->allowed_opcode == opcode;
888 }
889
mlx5_cmd_is_down(struct mlx5_core_dev * dev)890 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
891 {
892 return pci_channel_offline(dev->pdev) ||
893 dev->cmd.state != MLX5_CMDIF_STATE_UP ||
894 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
895 }
896
cmd_work_handler(struct work_struct * work)897 static void cmd_work_handler(struct work_struct *work)
898 {
899 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
900 struct mlx5_cmd *cmd = ent->cmd;
901 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
902 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
903 struct mlx5_cmd_layout *lay;
904 struct semaphore *sem;
905 unsigned long flags;
906 bool poll_cmd = ent->polling;
907 int alloc_ret;
908 int cmd_mode;
909
910 complete(&ent->handling);
911 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
912 down(sem);
913 if (!ent->page_queue) {
914 alloc_ret = cmd_alloc_index(cmd);
915 if (alloc_ret < 0) {
916 mlx5_core_err_rl(dev, "failed to allocate command entry\n");
917 if (ent->callback) {
918 ent->callback(-EAGAIN, ent->context);
919 mlx5_free_cmd_msg(dev, ent->out);
920 free_msg(dev, ent->in);
921 cmd_ent_put(ent);
922 } else {
923 ent->ret = -EAGAIN;
924 complete(&ent->done);
925 }
926 up(sem);
927 return;
928 }
929 ent->idx = alloc_ret;
930 } else {
931 ent->idx = cmd->max_reg_cmds;
932 spin_lock_irqsave(&cmd->alloc_lock, flags);
933 clear_bit(ent->idx, &cmd->bitmask);
934 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
935 }
936
937 cmd->ent_arr[ent->idx] = ent;
938 lay = get_inst(cmd, ent->idx);
939 ent->lay = lay;
940 memset(lay, 0, sizeof(*lay));
941 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
942 ent->op = be32_to_cpu(lay->in[0]) >> 16;
943 if (ent->in->next)
944 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
945 lay->inlen = cpu_to_be32(ent->in->len);
946 if (ent->out->next)
947 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
948 lay->outlen = cpu_to_be32(ent->out->len);
949 lay->type = MLX5_PCI_CMD_XPORT;
950 lay->token = ent->token;
951 lay->status_own = CMD_OWNER_HW;
952 set_signature(ent, !cmd->checksum_disabled);
953 dump_command(dev, ent, 1);
954 ent->ts1 = ktime_get_ns();
955 cmd_mode = cmd->mode;
956
957 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
958 cmd_ent_get(ent);
959 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
960
961 /* Skip sending command to fw if internal error */
962 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
963 u8 status = 0;
964 u32 drv_synd;
965
966 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
967 MLX5_SET(mbox_out, ent->out, status, status);
968 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
969
970 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
971 return;
972 }
973
974 cmd_ent_get(ent); /* for the _real_ FW event on completion */
975 /* ring doorbell after the descriptor is valid */
976 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
977 wmb();
978 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
979 /* if not in polling don't use ent after this point */
980 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
981 poll_timeout(ent);
982 /* make sure we read the descriptor after ownership is SW */
983 rmb();
984 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
985 }
986 }
987
deliv_status_to_str(u8 status)988 static const char *deliv_status_to_str(u8 status)
989 {
990 switch (status) {
991 case MLX5_CMD_DELIVERY_STAT_OK:
992 return "no errors";
993 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
994 return "signature error";
995 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
996 return "token error";
997 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
998 return "bad block number";
999 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1000 return "output pointer not aligned to block size";
1001 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1002 return "input pointer not aligned to block size";
1003 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1004 return "firmware internal error";
1005 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1006 return "command input length error";
1007 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1008 return "command output length error";
1009 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1010 return "reserved fields not cleared";
1011 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1012 return "bad command descriptor type";
1013 default:
1014 return "unknown status code";
1015 }
1016 }
1017
1018 enum {
1019 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000,
1020 };
1021
wait_func_handle_exec_timeout(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1022 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
1023 struct mlx5_cmd_work_ent *ent)
1024 {
1025 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
1026
1027 mlx5_cmd_eq_recover(dev);
1028
1029 /* Re-wait on the ent->done after executing the recovery flow. If the
1030 * recovery flow (or any other recovery flow running simultaneously)
1031 * has recovered an EQE, it should cause the entry to be completed by
1032 * the command interface.
1033 */
1034 if (wait_for_completion_timeout(&ent->done, timeout)) {
1035 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
1036 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1037 return;
1038 }
1039
1040 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
1041 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
1042
1043 ent->ret = -ETIMEDOUT;
1044 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
1045 }
1046
wait_func(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)1047 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
1048 {
1049 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
1050 struct mlx5_cmd *cmd = &dev->cmd;
1051 int err;
1052
1053 if (!wait_for_completion_timeout(&ent->handling, timeout) &&
1054 cancel_work_sync(&ent->work)) {
1055 ent->ret = -ECANCELED;
1056 goto out_err;
1057 }
1058 if (cmd->mode == CMD_MODE_POLLING || ent->polling)
1059 wait_for_completion(&ent->done);
1060 else if (!wait_for_completion_timeout(&ent->done, timeout))
1061 wait_func_handle_exec_timeout(dev, ent);
1062
1063 out_err:
1064 err = ent->ret;
1065
1066 if (err == -ETIMEDOUT) {
1067 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1068 mlx5_command_str(msg_to_opcode(ent->in)),
1069 msg_to_opcode(ent->in));
1070 } else if (err == -ECANCELED) {
1071 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1072 mlx5_command_str(msg_to_opcode(ent->in)),
1073 msg_to_opcode(ent->in));
1074 }
1075 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1076 err, deliv_status_to_str(ent->status), ent->status);
1077
1078 return err;
1079 }
1080
1081 /* Notes:
1082 * 1. Callback functions may not sleep
1083 * 2. page queue commands do not support asynchrous completion
1084 */
mlx5_cmd_invoke(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t callback,void * context,int page_queue,u8 * status,u8 token,bool force_polling)1085 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1086 struct mlx5_cmd_msg *out, void *uout, int uout_size,
1087 mlx5_cmd_cbk_t callback,
1088 void *context, int page_queue, u8 *status,
1089 u8 token, bool force_polling)
1090 {
1091 struct mlx5_cmd *cmd = &dev->cmd;
1092 struct mlx5_cmd_work_ent *ent;
1093 struct mlx5_cmd_stats *stats;
1094 int err = 0;
1095 s64 ds;
1096 u16 op;
1097
1098 if (callback && page_queue)
1099 return -EINVAL;
1100
1101 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
1102 callback, context, page_queue);
1103 if (IS_ERR(ent))
1104 return PTR_ERR(ent);
1105
1106 /* put for this ent is when consumed, depending on the use case
1107 * 1) (!callback) blocking flow: by caller after wait_func completes
1108 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1109 */
1110
1111 ent->token = token;
1112 ent->polling = force_polling;
1113
1114 init_completion(&ent->handling);
1115 if (!callback)
1116 init_completion(&ent->done);
1117
1118 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1119 INIT_WORK(&ent->work, cmd_work_handler);
1120 if (page_queue) {
1121 cmd_work_handler(&ent->work);
1122 } else if (!queue_work(cmd->wq, &ent->work)) {
1123 mlx5_core_warn(dev, "failed to queue work\n");
1124 err = -ENOMEM;
1125 goto out_free;
1126 }
1127
1128 if (callback)
1129 goto out; /* mlx5_cmd_comp_handler() will put(ent) */
1130
1131 err = wait_func(dev, ent);
1132 if (err == -ETIMEDOUT || err == -ECANCELED)
1133 goto out_free;
1134
1135 ds = ent->ts2 - ent->ts1;
1136 op = MLX5_GET(mbox_in, in->first.data, opcode);
1137 if (op < MLX5_CMD_OP_MAX) {
1138 stats = &cmd->stats[op];
1139 spin_lock_irq(&stats->lock);
1140 stats->sum += ds;
1141 ++stats->n;
1142 spin_unlock_irq(&stats->lock);
1143 }
1144 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1145 "fw exec time for %s is %lld nsec\n",
1146 mlx5_command_str(op), ds);
1147 *status = ent->status;
1148
1149 out_free:
1150 cmd_ent_put(ent);
1151 out:
1152 return err;
1153 }
1154
dbg_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1155 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1156 size_t count, loff_t *pos)
1157 {
1158 struct mlx5_core_dev *dev = filp->private_data;
1159 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1160 char lbuf[3];
1161 int err;
1162
1163 if (!dbg->in_msg || !dbg->out_msg)
1164 return -ENOMEM;
1165
1166 if (count < sizeof(lbuf) - 1)
1167 return -EINVAL;
1168
1169 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1170 return -EFAULT;
1171
1172 lbuf[sizeof(lbuf) - 1] = 0;
1173
1174 if (strcmp(lbuf, "go"))
1175 return -EINVAL;
1176
1177 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1178
1179 return err ? err : count;
1180 }
1181
1182 static const struct file_operations fops = {
1183 .owner = THIS_MODULE,
1184 .open = simple_open,
1185 .write = dbg_write,
1186 };
1187
mlx5_copy_to_msg(struct mlx5_cmd_msg * to,void * from,int size,u8 token)1188 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1189 u8 token)
1190 {
1191 struct mlx5_cmd_prot_block *block;
1192 struct mlx5_cmd_mailbox *next;
1193 int copy;
1194
1195 if (!to || !from)
1196 return -ENOMEM;
1197
1198 copy = min_t(int, size, sizeof(to->first.data));
1199 memcpy(to->first.data, from, copy);
1200 size -= copy;
1201 from += copy;
1202
1203 next = to->next;
1204 while (size) {
1205 if (!next) {
1206 /* this is a BUG */
1207 return -ENOMEM;
1208 }
1209
1210 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1211 block = next->buf;
1212 memcpy(block->data, from, copy);
1213 from += copy;
1214 size -= copy;
1215 block->token = token;
1216 next = next->next;
1217 }
1218
1219 return 0;
1220 }
1221
mlx5_copy_from_msg(void * to,struct mlx5_cmd_msg * from,int size)1222 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1223 {
1224 struct mlx5_cmd_prot_block *block;
1225 struct mlx5_cmd_mailbox *next;
1226 int copy;
1227
1228 if (!to || !from)
1229 return -ENOMEM;
1230
1231 copy = min_t(int, size, sizeof(from->first.data));
1232 memcpy(to, from->first.data, copy);
1233 size -= copy;
1234 to += copy;
1235
1236 next = from->next;
1237 while (size) {
1238 if (!next) {
1239 /* this is a BUG */
1240 return -ENOMEM;
1241 }
1242
1243 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1244 block = next->buf;
1245
1246 memcpy(to, block->data, copy);
1247 to += copy;
1248 size -= copy;
1249 next = next->next;
1250 }
1251
1252 return 0;
1253 }
1254
alloc_cmd_box(struct mlx5_core_dev * dev,gfp_t flags)1255 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1256 gfp_t flags)
1257 {
1258 struct mlx5_cmd_mailbox *mailbox;
1259
1260 mailbox = kmalloc(sizeof(*mailbox), flags);
1261 if (!mailbox)
1262 return ERR_PTR(-ENOMEM);
1263
1264 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1265 &mailbox->dma);
1266 if (!mailbox->buf) {
1267 mlx5_core_dbg(dev, "failed allocation\n");
1268 kfree(mailbox);
1269 return ERR_PTR(-ENOMEM);
1270 }
1271 mailbox->next = NULL;
1272
1273 return mailbox;
1274 }
1275
free_cmd_box(struct mlx5_core_dev * dev,struct mlx5_cmd_mailbox * mailbox)1276 static void free_cmd_box(struct mlx5_core_dev *dev,
1277 struct mlx5_cmd_mailbox *mailbox)
1278 {
1279 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1280 kfree(mailbox);
1281 }
1282
mlx5_alloc_cmd_msg(struct mlx5_core_dev * dev,gfp_t flags,int size,u8 token)1283 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1284 gfp_t flags, int size,
1285 u8 token)
1286 {
1287 struct mlx5_cmd_mailbox *tmp, *head = NULL;
1288 struct mlx5_cmd_prot_block *block;
1289 struct mlx5_cmd_msg *msg;
1290 int err;
1291 int n;
1292 int i;
1293
1294 msg = kzalloc(sizeof(*msg), flags);
1295 if (!msg)
1296 return ERR_PTR(-ENOMEM);
1297
1298 msg->len = size;
1299 n = mlx5_calc_cmd_blocks(msg);
1300
1301 for (i = 0; i < n; i++) {
1302 tmp = alloc_cmd_box(dev, flags);
1303 if (IS_ERR(tmp)) {
1304 mlx5_core_warn(dev, "failed allocating block\n");
1305 err = PTR_ERR(tmp);
1306 goto err_alloc;
1307 }
1308
1309 block = tmp->buf;
1310 tmp->next = head;
1311 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1312 block->block_num = cpu_to_be32(n - i - 1);
1313 block->token = token;
1314 head = tmp;
1315 }
1316 msg->next = head;
1317 return msg;
1318
1319 err_alloc:
1320 while (head) {
1321 tmp = head->next;
1322 free_cmd_box(dev, head);
1323 head = tmp;
1324 }
1325 kfree(msg);
1326
1327 return ERR_PTR(err);
1328 }
1329
mlx5_free_cmd_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1330 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1331 struct mlx5_cmd_msg *msg)
1332 {
1333 struct mlx5_cmd_mailbox *head = msg->next;
1334 struct mlx5_cmd_mailbox *next;
1335
1336 while (head) {
1337 next = head->next;
1338 free_cmd_box(dev, head);
1339 head = next;
1340 }
1341 kfree(msg);
1342 }
1343
data_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1344 static ssize_t data_write(struct file *filp, const char __user *buf,
1345 size_t count, loff_t *pos)
1346 {
1347 struct mlx5_core_dev *dev = filp->private_data;
1348 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1349 void *ptr;
1350
1351 if (*pos != 0)
1352 return -EINVAL;
1353
1354 kfree(dbg->in_msg);
1355 dbg->in_msg = NULL;
1356 dbg->inlen = 0;
1357 ptr = memdup_user(buf, count);
1358 if (IS_ERR(ptr))
1359 return PTR_ERR(ptr);
1360 dbg->in_msg = ptr;
1361 dbg->inlen = count;
1362
1363 *pos = count;
1364
1365 return count;
1366 }
1367
data_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1368 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1369 loff_t *pos)
1370 {
1371 struct mlx5_core_dev *dev = filp->private_data;
1372 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1373
1374 if (!dbg->out_msg)
1375 return -ENOMEM;
1376
1377 return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1378 dbg->outlen);
1379 }
1380
1381 static const struct file_operations dfops = {
1382 .owner = THIS_MODULE,
1383 .open = simple_open,
1384 .write = data_write,
1385 .read = data_read,
1386 };
1387
outlen_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)1388 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1389 loff_t *pos)
1390 {
1391 struct mlx5_core_dev *dev = filp->private_data;
1392 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1393 char outlen[8];
1394 int err;
1395
1396 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1397 if (err < 0)
1398 return err;
1399
1400 return simple_read_from_buffer(buf, count, pos, outlen, err);
1401 }
1402
outlen_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)1403 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1404 size_t count, loff_t *pos)
1405 {
1406 struct mlx5_core_dev *dev = filp->private_data;
1407 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1408 char outlen_str[8] = {0};
1409 int outlen;
1410 void *ptr;
1411 int err;
1412
1413 if (*pos != 0 || count > 6)
1414 return -EINVAL;
1415
1416 kfree(dbg->out_msg);
1417 dbg->out_msg = NULL;
1418 dbg->outlen = 0;
1419
1420 if (copy_from_user(outlen_str, buf, count))
1421 return -EFAULT;
1422
1423 err = sscanf(outlen_str, "%d", &outlen);
1424 if (err < 0)
1425 return err;
1426
1427 ptr = kzalloc(outlen, GFP_KERNEL);
1428 if (!ptr)
1429 return -ENOMEM;
1430
1431 dbg->out_msg = ptr;
1432 dbg->outlen = outlen;
1433
1434 *pos = count;
1435
1436 return count;
1437 }
1438
1439 static const struct file_operations olfops = {
1440 .owner = THIS_MODULE,
1441 .open = simple_open,
1442 .write = outlen_write,
1443 .read = outlen_read,
1444 };
1445
set_wqname(struct mlx5_core_dev * dev)1446 static void set_wqname(struct mlx5_core_dev *dev)
1447 {
1448 struct mlx5_cmd *cmd = &dev->cmd;
1449
1450 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1451 dev_name(dev->device));
1452 }
1453
clean_debug_files(struct mlx5_core_dev * dev)1454 static void clean_debug_files(struct mlx5_core_dev *dev)
1455 {
1456 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1457
1458 if (!mlx5_debugfs_root)
1459 return;
1460
1461 mlx5_cmdif_debugfs_cleanup(dev);
1462 debugfs_remove_recursive(dbg->dbg_root);
1463 }
1464
create_debugfs_files(struct mlx5_core_dev * dev)1465 static void create_debugfs_files(struct mlx5_core_dev *dev)
1466 {
1467 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1468
1469 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1470
1471 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1472 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1473 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1474 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1475 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1476
1477 mlx5_cmdif_debugfs_init(dev);
1478 }
1479
mlx5_cmd_allowed_opcode(struct mlx5_core_dev * dev,u16 opcode)1480 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1481 {
1482 struct mlx5_cmd *cmd = &dev->cmd;
1483 int i;
1484
1485 for (i = 0; i < cmd->max_reg_cmds; i++)
1486 down(&cmd->sem);
1487 down(&cmd->pages_sem);
1488
1489 cmd->allowed_opcode = opcode;
1490
1491 up(&cmd->pages_sem);
1492 for (i = 0; i < cmd->max_reg_cmds; i++)
1493 up(&cmd->sem);
1494 }
1495
mlx5_cmd_change_mod(struct mlx5_core_dev * dev,int mode)1496 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1497 {
1498 struct mlx5_cmd *cmd = &dev->cmd;
1499 int i;
1500
1501 for (i = 0; i < cmd->max_reg_cmds; i++)
1502 down(&cmd->sem);
1503 down(&cmd->pages_sem);
1504
1505 cmd->mode = mode;
1506
1507 up(&cmd->pages_sem);
1508 for (i = 0; i < cmd->max_reg_cmds; i++)
1509 up(&cmd->sem);
1510 }
1511
cmd_comp_notifier(struct notifier_block * nb,unsigned long type,void * data)1512 static int cmd_comp_notifier(struct notifier_block *nb,
1513 unsigned long type, void *data)
1514 {
1515 struct mlx5_core_dev *dev;
1516 struct mlx5_cmd *cmd;
1517 struct mlx5_eqe *eqe;
1518
1519 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1520 dev = container_of(cmd, struct mlx5_core_dev, cmd);
1521 eqe = data;
1522
1523 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1524
1525 return NOTIFY_OK;
1526 }
mlx5_cmd_use_events(struct mlx5_core_dev * dev)1527 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1528 {
1529 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1530 mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1531 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1532 }
1533
mlx5_cmd_use_polling(struct mlx5_core_dev * dev)1534 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1535 {
1536 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1537 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1538 }
1539
free_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1540 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1541 {
1542 unsigned long flags;
1543
1544 if (msg->parent) {
1545 spin_lock_irqsave(&msg->parent->lock, flags);
1546 list_add_tail(&msg->list, &msg->parent->head);
1547 spin_unlock_irqrestore(&msg->parent->lock, flags);
1548 } else {
1549 mlx5_free_cmd_msg(dev, msg);
1550 }
1551 }
1552
mlx5_cmd_comp_handler(struct mlx5_core_dev * dev,u64 vec,bool forced)1553 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1554 {
1555 struct mlx5_cmd *cmd = &dev->cmd;
1556 struct mlx5_cmd_work_ent *ent;
1557 mlx5_cmd_cbk_t callback;
1558 void *context;
1559 int err;
1560 int i;
1561 s64 ds;
1562 struct mlx5_cmd_stats *stats;
1563 unsigned long flags;
1564 unsigned long vector;
1565
1566 /* there can be at most 32 command queues */
1567 vector = vec & 0xffffffff;
1568 for (i = 0; i < (1 << cmd->log_sz); i++) {
1569 if (test_bit(i, &vector)) {
1570 ent = cmd->ent_arr[i];
1571
1572 /* if we already completed the command, ignore it */
1573 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1574 &ent->state)) {
1575 /* only real completion can free the cmd slot */
1576 if (!forced) {
1577 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1578 ent->idx);
1579 cmd_ent_put(ent);
1580 }
1581 continue;
1582 }
1583
1584 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
1585 cmd_ent_put(ent); /* timeout work was canceled */
1586
1587 if (!forced || /* Real FW completion */
1588 pci_channel_offline(dev->pdev) || /* FW is inaccessible */
1589 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1590 cmd_ent_put(ent);
1591
1592 ent->ts2 = ktime_get_ns();
1593 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1594 dump_command(dev, ent, 0);
1595 if (!ent->ret) {
1596 if (!cmd->checksum_disabled)
1597 ent->ret = verify_signature(ent);
1598 else
1599 ent->ret = 0;
1600 if (vec & MLX5_TRIGGERED_CMD_COMP)
1601 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1602 else
1603 ent->status = ent->lay->status_own >> 1;
1604
1605 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1606 ent->ret, deliv_status_to_str(ent->status), ent->status);
1607 }
1608
1609 if (ent->callback) {
1610 ds = ent->ts2 - ent->ts1;
1611 if (ent->op < MLX5_CMD_OP_MAX) {
1612 stats = &cmd->stats[ent->op];
1613 spin_lock_irqsave(&stats->lock, flags);
1614 stats->sum += ds;
1615 ++stats->n;
1616 spin_unlock_irqrestore(&stats->lock, flags);
1617 }
1618
1619 callback = ent->callback;
1620 context = ent->context;
1621 err = ent->ret;
1622 if (!err) {
1623 err = mlx5_copy_from_msg(ent->uout,
1624 ent->out,
1625 ent->uout_size);
1626
1627 err = err ? err : mlx5_cmd_check(dev,
1628 ent->in->first.data,
1629 ent->uout);
1630 }
1631
1632 mlx5_free_cmd_msg(dev, ent->out);
1633 free_msg(dev, ent->in);
1634
1635 err = err ? err : ent->status;
1636 /* final consumer is done, release ent */
1637 cmd_ent_put(ent);
1638 callback(err, context);
1639 } else {
1640 /* release wait_func() so mlx5_cmd_invoke()
1641 * can make the final ent_put()
1642 */
1643 complete(&ent->done);
1644 }
1645 }
1646 }
1647 }
1648
mlx5_cmd_trigger_completions(struct mlx5_core_dev * dev)1649 void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1650 {
1651 struct mlx5_cmd *cmd = &dev->cmd;
1652 unsigned long bitmask;
1653 unsigned long flags;
1654 u64 vector;
1655 int i;
1656
1657 /* wait for pending handlers to complete */
1658 mlx5_eq_synchronize_cmd_irq(dev);
1659 spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1660 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1661 if (!vector)
1662 goto no_trig;
1663
1664 bitmask = vector;
1665 /* we must increment the allocated entries refcount before triggering the completions
1666 * to guarantee pending commands will not get freed in the meanwhile.
1667 * For that reason, it also has to be done inside the alloc_lock.
1668 */
1669 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1670 cmd_ent_get(cmd->ent_arr[i]);
1671 vector |= MLX5_TRIGGERED_CMD_COMP;
1672 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1673
1674 mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1675 mlx5_cmd_comp_handler(dev, vector, true);
1676 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
1677 cmd_ent_put(cmd->ent_arr[i]);
1678 return;
1679
1680 no_trig:
1681 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1682 }
1683
mlx5_cmd_flush(struct mlx5_core_dev * dev)1684 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1685 {
1686 struct mlx5_cmd *cmd = &dev->cmd;
1687 int i;
1688
1689 for (i = 0; i < cmd->max_reg_cmds; i++)
1690 while (down_trylock(&cmd->sem))
1691 mlx5_cmd_trigger_completions(dev);
1692
1693 while (down_trylock(&cmd->pages_sem))
1694 mlx5_cmd_trigger_completions(dev);
1695
1696 /* Unlock cmdif */
1697 up(&cmd->pages_sem);
1698 for (i = 0; i < cmd->max_reg_cmds; i++)
1699 up(&cmd->sem);
1700 }
1701
status_to_err(u8 status)1702 static int status_to_err(u8 status)
1703 {
1704 switch (status) {
1705 case MLX5_CMD_DELIVERY_STAT_OK:
1706 case MLX5_DRIVER_STATUS_ABORTED:
1707 return 0;
1708 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1709 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1710 return -EBADR;
1711 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1712 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1713 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1714 return -EFAULT; /* Bad address */
1715 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1716 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1717 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1718 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1719 return -ENOMSG;
1720 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1721 return -EIO;
1722 default:
1723 return -EINVAL;
1724 }
1725 }
1726
alloc_msg(struct mlx5_core_dev * dev,int in_size,gfp_t gfp)1727 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1728 gfp_t gfp)
1729 {
1730 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1731 struct cmd_msg_cache *ch = NULL;
1732 struct mlx5_cmd *cmd = &dev->cmd;
1733 int i;
1734
1735 if (in_size <= 16)
1736 goto cache_miss;
1737
1738 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1739 ch = &cmd->cache[i];
1740 if (in_size > ch->max_inbox_size)
1741 continue;
1742 spin_lock_irq(&ch->lock);
1743 if (list_empty(&ch->head)) {
1744 spin_unlock_irq(&ch->lock);
1745 continue;
1746 }
1747 msg = list_entry(ch->head.next, typeof(*msg), list);
1748 /* For cached lists, we must explicitly state what is
1749 * the real size
1750 */
1751 msg->len = in_size;
1752 list_del(&msg->list);
1753 spin_unlock_irq(&ch->lock);
1754 break;
1755 }
1756
1757 if (!IS_ERR(msg))
1758 return msg;
1759
1760 cache_miss:
1761 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1762 return msg;
1763 }
1764
is_manage_pages(void * in)1765 static int is_manage_pages(void *in)
1766 {
1767 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1768 }
1769
cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context,bool force_polling)1770 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1771 int out_size, mlx5_cmd_cbk_t callback, void *context,
1772 bool force_polling)
1773 {
1774 struct mlx5_cmd_msg *inb;
1775 struct mlx5_cmd_msg *outb;
1776 int pages_queue;
1777 gfp_t gfp;
1778 int err;
1779 u8 status = 0;
1780 u32 drv_synd;
1781 u16 opcode;
1782 u8 token;
1783
1784 opcode = MLX5_GET(mbox_in, in, opcode);
1785 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
1786 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1787 MLX5_SET(mbox_out, out, status, status);
1788 MLX5_SET(mbox_out, out, syndrome, drv_synd);
1789 return err;
1790 }
1791
1792 pages_queue = is_manage_pages(in);
1793 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1794
1795 inb = alloc_msg(dev, in_size, gfp);
1796 if (IS_ERR(inb)) {
1797 err = PTR_ERR(inb);
1798 return err;
1799 }
1800
1801 token = alloc_token(&dev->cmd);
1802
1803 err = mlx5_copy_to_msg(inb, in, in_size, token);
1804 if (err) {
1805 mlx5_core_warn(dev, "err %d\n", err);
1806 goto out_in;
1807 }
1808
1809 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1810 if (IS_ERR(outb)) {
1811 err = PTR_ERR(outb);
1812 goto out_in;
1813 }
1814
1815 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1816 pages_queue, &status, token, force_polling);
1817 if (err)
1818 goto out_out;
1819
1820 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1821 if (status) {
1822 err = status_to_err(status);
1823 goto out_out;
1824 }
1825
1826 if (!callback)
1827 err = mlx5_copy_from_msg(out, outb, out_size);
1828
1829 out_out:
1830 if (!callback)
1831 mlx5_free_cmd_msg(dev, outb);
1832
1833 out_in:
1834 if (!callback)
1835 free_msg(dev, inb);
1836 return err;
1837 }
1838
mlx5_cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1839 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1840 int out_size)
1841 {
1842 int err;
1843
1844 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
1845 return err ? : mlx5_cmd_check(dev, in, out);
1846 }
1847 EXPORT_SYMBOL(mlx5_cmd_exec);
1848
mlx5_cmd_init_async_ctx(struct mlx5_core_dev * dev,struct mlx5_async_ctx * ctx)1849 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
1850 struct mlx5_async_ctx *ctx)
1851 {
1852 ctx->dev = dev;
1853 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1854 atomic_set(&ctx->num_inflight, 1);
1855 init_waitqueue_head(&ctx->wait);
1856 }
1857 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
1858
1859 /**
1860 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1861 * @ctx: The ctx to clean
1862 *
1863 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1864 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1865 * the call mlx5_cleanup_async_ctx().
1866 */
mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx * ctx)1867 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
1868 {
1869 atomic_dec(&ctx->num_inflight);
1870 wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
1871 }
1872 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
1873
mlx5_cmd_exec_cb_handler(int status,void * _work)1874 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
1875 {
1876 struct mlx5_async_work *work = _work;
1877 struct mlx5_async_ctx *ctx = work->ctx;
1878
1879 work->user_callback(status, work);
1880 if (atomic_dec_and_test(&ctx->num_inflight))
1881 wake_up(&ctx->wait);
1882 }
1883
mlx5_cmd_exec_cb(struct mlx5_async_ctx * ctx,void * in,int in_size,void * out,int out_size,mlx5_async_cbk_t callback,struct mlx5_async_work * work)1884 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
1885 void *out, int out_size, mlx5_async_cbk_t callback,
1886 struct mlx5_async_work *work)
1887 {
1888 int ret;
1889
1890 work->ctx = ctx;
1891 work->user_callback = callback;
1892 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
1893 return -EIO;
1894 ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
1895 mlx5_cmd_exec_cb_handler, work, false);
1896 if (ret && atomic_dec_and_test(&ctx->num_inflight))
1897 wake_up(&ctx->wait);
1898
1899 return ret;
1900 }
1901 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1902
mlx5_cmd_exec_polling(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1903 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1904 void *out, int out_size)
1905 {
1906 int err;
1907
1908 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
1909
1910 return err ? : mlx5_cmd_check(dev, in, out);
1911 }
1912 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1913
destroy_msg_cache(struct mlx5_core_dev * dev)1914 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1915 {
1916 struct cmd_msg_cache *ch;
1917 struct mlx5_cmd_msg *msg;
1918 struct mlx5_cmd_msg *n;
1919 int i;
1920
1921 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1922 ch = &dev->cmd.cache[i];
1923 list_for_each_entry_safe(msg, n, &ch->head, list) {
1924 list_del(&msg->list);
1925 mlx5_free_cmd_msg(dev, msg);
1926 }
1927 }
1928 }
1929
1930 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
1931 512, 32, 16, 8, 2
1932 };
1933
1934 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
1935 16 + MLX5_CMD_DATA_BLOCK_SIZE,
1936 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
1937 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
1938 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
1939 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
1940 };
1941
create_msg_cache(struct mlx5_core_dev * dev)1942 static void create_msg_cache(struct mlx5_core_dev *dev)
1943 {
1944 struct mlx5_cmd *cmd = &dev->cmd;
1945 struct cmd_msg_cache *ch;
1946 struct mlx5_cmd_msg *msg;
1947 int i;
1948 int k;
1949
1950 /* Initialize and fill the caches with initial entries */
1951 for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
1952 ch = &cmd->cache[k];
1953 spin_lock_init(&ch->lock);
1954 INIT_LIST_HEAD(&ch->head);
1955 ch->num_ent = cmd_cache_num_ent[k];
1956 ch->max_inbox_size = cmd_cache_ent_size[k];
1957 for (i = 0; i < ch->num_ent; i++) {
1958 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
1959 ch->max_inbox_size, 0);
1960 if (IS_ERR(msg))
1961 break;
1962 msg->parent = ch;
1963 list_add_tail(&msg->list, &ch->head);
1964 }
1965 }
1966 }
1967
alloc_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1968 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1969 {
1970 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
1971 &cmd->alloc_dma, GFP_KERNEL);
1972 if (!cmd->cmd_alloc_buf)
1973 return -ENOMEM;
1974
1975 /* make sure it is aligned to 4K */
1976 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1977 cmd->cmd_buf = cmd->cmd_alloc_buf;
1978 cmd->dma = cmd->alloc_dma;
1979 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1980 return 0;
1981 }
1982
1983 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1984 cmd->alloc_dma);
1985 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
1986 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1987 &cmd->alloc_dma, GFP_KERNEL);
1988 if (!cmd->cmd_alloc_buf)
1989 return -ENOMEM;
1990
1991 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1992 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1993 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1994 return 0;
1995 }
1996
free_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1997 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1998 {
1999 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
2000 cmd->alloc_dma);
2001 }
2002
cmdif_rev(struct mlx5_core_dev * dev)2003 static u16 cmdif_rev(struct mlx5_core_dev *dev)
2004 {
2005 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2006 }
2007
mlx5_cmd_init(struct mlx5_core_dev * dev)2008 int mlx5_cmd_init(struct mlx5_core_dev *dev)
2009 {
2010 int size = sizeof(struct mlx5_cmd_prot_block);
2011 int align = roundup_pow_of_two(size);
2012 struct mlx5_cmd *cmd = &dev->cmd;
2013 u32 cmd_h, cmd_l;
2014 u16 cmd_if_rev;
2015 int err;
2016 int i;
2017
2018 memset(cmd, 0, sizeof(*cmd));
2019 cmd_if_rev = cmdif_rev(dev);
2020 if (cmd_if_rev != CMD_IF_REV) {
2021 mlx5_core_err(dev,
2022 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2023 CMD_IF_REV, cmd_if_rev);
2024 return -EINVAL;
2025 }
2026
2027 cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
2028 if (!cmd->stats)
2029 return -ENOMEM;
2030
2031 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
2032 if (!cmd->pool) {
2033 err = -ENOMEM;
2034 goto dma_pool_err;
2035 }
2036
2037 err = alloc_cmd_page(dev, cmd);
2038 if (err)
2039 goto err_free_pool;
2040
2041 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
2042 cmd->log_sz = cmd_l >> 4 & 0xf;
2043 cmd->log_stride = cmd_l & 0xf;
2044 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
2045 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
2046 1 << cmd->log_sz);
2047 err = -EINVAL;
2048 goto err_free_page;
2049 }
2050
2051 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
2052 mlx5_core_err(dev, "command queue size overflow\n");
2053 err = -EINVAL;
2054 goto err_free_page;
2055 }
2056
2057 cmd->state = MLX5_CMDIF_STATE_DOWN;
2058 cmd->checksum_disabled = 1;
2059 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
2060 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
2061
2062 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2063 if (cmd->cmdif_rev > CMD_IF_REV) {
2064 mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
2065 CMD_IF_REV, cmd->cmdif_rev);
2066 err = -EOPNOTSUPP;
2067 goto err_free_page;
2068 }
2069
2070 spin_lock_init(&cmd->alloc_lock);
2071 spin_lock_init(&cmd->token_lock);
2072 for (i = 0; i < MLX5_CMD_OP_MAX; i++)
2073 spin_lock_init(&cmd->stats[i].lock);
2074
2075 sema_init(&cmd->sem, cmd->max_reg_cmds);
2076 sema_init(&cmd->pages_sem, 1);
2077
2078 cmd_h = (u32)((u64)(cmd->dma) >> 32);
2079 cmd_l = (u32)(cmd->dma);
2080 if (cmd_l & 0xfff) {
2081 mlx5_core_err(dev, "invalid command queue address\n");
2082 err = -ENOMEM;
2083 goto err_free_page;
2084 }
2085
2086 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2087 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2088
2089 /* Make sure firmware sees the complete address before we proceed */
2090 wmb();
2091
2092 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2093
2094 cmd->mode = CMD_MODE_POLLING;
2095 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2096
2097 create_msg_cache(dev);
2098
2099 set_wqname(dev);
2100 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2101 if (!cmd->wq) {
2102 mlx5_core_err(dev, "failed to create command workqueue\n");
2103 err = -ENOMEM;
2104 goto err_cache;
2105 }
2106
2107 create_debugfs_files(dev);
2108
2109 return 0;
2110
2111 err_cache:
2112 destroy_msg_cache(dev);
2113
2114 err_free_page:
2115 free_cmd_page(dev, cmd);
2116
2117 err_free_pool:
2118 dma_pool_destroy(cmd->pool);
2119 dma_pool_err:
2120 kvfree(cmd->stats);
2121 return err;
2122 }
2123 EXPORT_SYMBOL(mlx5_cmd_init);
2124
mlx5_cmd_cleanup(struct mlx5_core_dev * dev)2125 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2126 {
2127 struct mlx5_cmd *cmd = &dev->cmd;
2128
2129 clean_debug_files(dev);
2130 destroy_workqueue(cmd->wq);
2131 destroy_msg_cache(dev);
2132 free_cmd_page(dev, cmd);
2133 dma_pool_destroy(cmd->pool);
2134 kvfree(cmd->stats);
2135 }
2136 EXPORT_SYMBOL(mlx5_cmd_cleanup);
2137
mlx5_cmd_set_state(struct mlx5_core_dev * dev,enum mlx5_cmdif_state cmdif_state)2138 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2139 enum mlx5_cmdif_state cmdif_state)
2140 {
2141 dev->cmd.state = cmdif_state;
2142 }
2143 EXPORT_SYMBOL(mlx5_cmd_set_state);
2144