1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
54
55 struct mac_res {
56 struct list_head list;
57 u64 mac;
58 int ref_count;
59 u8 smac_index;
60 u8 port;
61 };
62
63 struct vlan_res {
64 struct list_head list;
65 u16 vlan;
66 int ref_count;
67 int vlan_index;
68 u8 port;
69 };
70
71 struct res_common {
72 struct list_head list;
73 struct rb_node node;
74 u64 res_id;
75 int owner;
76 int state;
77 int from_state;
78 int to_state;
79 int removing;
80 };
81
82 enum {
83 RES_ANY_BUSY = 1
84 };
85
86 struct res_gid {
87 struct list_head list;
88 u8 gid[16];
89 enum mlx4_protocol prot;
90 enum mlx4_steer_type steer;
91 u64 reg_id;
92 };
93
94 enum res_qp_states {
95 RES_QP_BUSY = RES_ANY_BUSY,
96
97 /* QP number was allocated */
98 RES_QP_RESERVED,
99
100 /* ICM memory for QP context was mapped */
101 RES_QP_MAPPED,
102
103 /* QP is in hw ownership */
104 RES_QP_HW
105 };
106
107 struct res_qp {
108 struct res_common com;
109 struct res_mtt *mtt;
110 struct res_cq *rcq;
111 struct res_cq *scq;
112 struct res_srq *srq;
113 struct list_head mcg_list;
114 spinlock_t mcg_spl;
115 int local_qpn;
116 atomic_t ref_count;
117 u32 qpc_flags;
118 /* saved qp params before VST enforcement in order to restore on VGT */
119 u8 sched_queue;
120 __be32 param3;
121 u8 vlan_control;
122 u8 fvl_rx;
123 u8 pri_path_fl;
124 u8 vlan_index;
125 u8 feup;
126 };
127
128 enum res_mtt_states {
129 RES_MTT_BUSY = RES_ANY_BUSY,
130 RES_MTT_ALLOCATED,
131 };
132
mtt_states_str(enum res_mtt_states state)133 static inline const char *mtt_states_str(enum res_mtt_states state)
134 {
135 switch (state) {
136 case RES_MTT_BUSY: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
139 }
140 }
141
142 struct res_mtt {
143 struct res_common com;
144 int order;
145 atomic_t ref_count;
146 };
147
148 enum res_mpt_states {
149 RES_MPT_BUSY = RES_ANY_BUSY,
150 RES_MPT_RESERVED,
151 RES_MPT_MAPPED,
152 RES_MPT_HW,
153 };
154
155 struct res_mpt {
156 struct res_common com;
157 struct res_mtt *mtt;
158 int key;
159 };
160
161 enum res_eq_states {
162 RES_EQ_BUSY = RES_ANY_BUSY,
163 RES_EQ_RESERVED,
164 RES_EQ_HW,
165 };
166
167 struct res_eq {
168 struct res_common com;
169 struct res_mtt *mtt;
170 };
171
172 enum res_cq_states {
173 RES_CQ_BUSY = RES_ANY_BUSY,
174 RES_CQ_ALLOCATED,
175 RES_CQ_HW,
176 };
177
178 struct res_cq {
179 struct res_common com;
180 struct res_mtt *mtt;
181 atomic_t ref_count;
182 };
183
184 enum res_srq_states {
185 RES_SRQ_BUSY = RES_ANY_BUSY,
186 RES_SRQ_ALLOCATED,
187 RES_SRQ_HW,
188 };
189
190 struct res_srq {
191 struct res_common com;
192 struct res_mtt *mtt;
193 struct res_cq *cq;
194 atomic_t ref_count;
195 };
196
197 enum res_counter_states {
198 RES_COUNTER_BUSY = RES_ANY_BUSY,
199 RES_COUNTER_ALLOCATED,
200 };
201
202 struct res_counter {
203 struct res_common com;
204 int port;
205 };
206
207 enum res_xrcdn_states {
208 RES_XRCD_BUSY = RES_ANY_BUSY,
209 RES_XRCD_ALLOCATED,
210 };
211
212 struct res_xrcdn {
213 struct res_common com;
214 int port;
215 };
216
217 enum res_fs_rule_states {
218 RES_FS_RULE_BUSY = RES_ANY_BUSY,
219 RES_FS_RULE_ALLOCATED,
220 };
221
222 struct res_fs_rule {
223 struct res_common com;
224 int qpn;
225 };
226
res_tracker_lookup(struct rb_root * root,u64 res_id)227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
228 {
229 struct rb_node *node = root->rb_node;
230
231 while (node) {
232 struct res_common *res = container_of(node, struct res_common,
233 node);
234
235 if (res_id < res->res_id)
236 node = node->rb_left;
237 else if (res_id > res->res_id)
238 node = node->rb_right;
239 else
240 return res;
241 }
242 return NULL;
243 }
244
res_tracker_insert(struct rb_root * root,struct res_common * res)245 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
246 {
247 struct rb_node **new = &(root->rb_node), *parent = NULL;
248
249 /* Figure out where to put new node */
250 while (*new) {
251 struct res_common *this = container_of(*new, struct res_common,
252 node);
253
254 parent = *new;
255 if (res->res_id < this->res_id)
256 new = &((*new)->rb_left);
257 else if (res->res_id > this->res_id)
258 new = &((*new)->rb_right);
259 else
260 return -EEXIST;
261 }
262
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res->node, parent, new);
265 rb_insert_color(&res->node, root);
266
267 return 0;
268 }
269
270 enum qp_transition {
271 QP_TRANS_INIT2RTR,
272 QP_TRANS_RTR2RTS,
273 QP_TRANS_RTS2RTS,
274 QP_TRANS_SQERR2RTS,
275 QP_TRANS_SQD2SQD,
276 QP_TRANS_SQD2RTS
277 };
278
279 /* For Debug uses */
resource_str(enum mlx4_resource rt)280 static const char *resource_str(enum mlx4_resource rt)
281 {
282 switch (rt) {
283 case RES_QP: return "RES_QP";
284 case RES_CQ: return "RES_CQ";
285 case RES_SRQ: return "RES_SRQ";
286 case RES_MPT: return "RES_MPT";
287 case RES_MTT: return "RES_MTT";
288 case RES_MAC: return "RES_MAC";
289 case RES_VLAN: return "RES_VLAN";
290 case RES_EQ: return "RES_EQ";
291 case RES_COUNTER: return "RES_COUNTER";
292 case RES_FS_RULE: return "RES_FS_RULE";
293 case RES_XRCD: return "RES_XRCD";
294 default: return "Unknown resource type !!!";
295 };
296 }
297
298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
mlx4_grant_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300 enum mlx4_resource res_type, int count,
301 int port)
302 {
303 struct mlx4_priv *priv = mlx4_priv(dev);
304 struct resource_allocator *res_alloc =
305 &priv->mfunc.master.res_tracker.res_alloc[res_type];
306 int err = -EINVAL;
307 int allocated, free, reserved, guaranteed, from_free;
308 int from_rsvd;
309
310 if (slave > dev->persist->num_vfs)
311 return -EINVAL;
312
313 spin_lock(&res_alloc->alloc_lock);
314 allocated = (port > 0) ?
315 res_alloc->allocated[(port - 1) *
316 (dev->persist->num_vfs + 1) + slave] :
317 res_alloc->allocated[slave];
318 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
319 res_alloc->res_free;
320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave];
323
324 if (allocated + count > res_alloc->quota[slave]) {
325 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326 slave, port, resource_str(res_type), count,
327 allocated, res_alloc->quota[slave]);
328 goto out;
329 }
330
331 if (allocated + count <= guaranteed) {
332 err = 0;
333 from_rsvd = count;
334 } else {
335 /* portion may need to be obtained from free area */
336 if (guaranteed - allocated > 0)
337 from_free = count - (guaranteed - allocated);
338 else
339 from_free = count;
340
341 from_rsvd = count - from_free;
342
343 if (free - from_free >= reserved)
344 err = 0;
345 else
346 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347 slave, port, resource_str(res_type), free,
348 from_free, reserved);
349 }
350
351 if (!err) {
352 /* grant the request */
353 if (port > 0) {
354 res_alloc->allocated[(port - 1) *
355 (dev->persist->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358 } else {
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
362 }
363 }
364
365 out:
366 spin_unlock(&res_alloc->alloc_lock);
367 return err;
368 }
369
mlx4_release_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
372 int port)
373 {
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
378
379 if (slave > dev->persist->num_vfs)
380 return;
381
382 spin_lock(&res_alloc->alloc_lock);
383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) *
386 (dev->persist->num_vfs + 1) + slave] :
387 res_alloc->allocated[slave];
388 guaranteed = res_alloc->guaranteed[slave];
389
390 if (allocated - count >= guaranteed) {
391 from_rsvd = 0;
392 } else {
393 /* portion may need to be returned to reserved area */
394 if (allocated - guaranteed > 0)
395 from_rsvd = count - (allocated - guaranteed);
396 else
397 from_rsvd = count;
398 }
399
400 if (port > 0) {
401 res_alloc->allocated[(port - 1) *
402 (dev->persist->num_vfs + 1) + slave] -= count;
403 res_alloc->res_port_free[port - 1] += count;
404 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
405 } else {
406 res_alloc->allocated[slave] -= count;
407 res_alloc->res_free += count;
408 res_alloc->res_reserved += from_rsvd;
409 }
410
411 spin_unlock(&res_alloc->alloc_lock);
412 return;
413 }
414
initialize_res_quotas(struct mlx4_dev * dev,struct resource_allocator * res_alloc,enum mlx4_resource res_type,int vf,int num_instances)415 static inline void initialize_res_quotas(struct mlx4_dev *dev,
416 struct resource_allocator *res_alloc,
417 enum mlx4_resource res_type,
418 int vf, int num_instances)
419 {
420 res_alloc->guaranteed[vf] = num_instances /
421 (2 * (dev->persist->num_vfs + 1));
422 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
423 if (vf == mlx4_master_func_num(dev)) {
424 res_alloc->res_free = num_instances;
425 if (res_type == RES_MTT) {
426 /* reserved mtts will be taken out of the PF allocation */
427 res_alloc->res_free += dev->caps.reserved_mtts;
428 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
429 res_alloc->quota[vf] += dev->caps.reserved_mtts;
430 }
431 }
432 }
433
mlx4_init_quotas(struct mlx4_dev * dev)434 void mlx4_init_quotas(struct mlx4_dev *dev)
435 {
436 struct mlx4_priv *priv = mlx4_priv(dev);
437 int pf;
438
439 /* quotas for VFs are initialized in mlx4_slave_cap */
440 if (mlx4_is_slave(dev))
441 return;
442
443 if (!mlx4_is_mfunc(dev)) {
444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
445 mlx4_num_reserved_sqps(dev);
446 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
447 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
448 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
449 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450 return;
451 }
452
453 pf = mlx4_master_func_num(dev);
454 dev->quotas.qp =
455 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
456 dev->quotas.cq =
457 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
458 dev->quotas.srq =
459 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
460 dev->quotas.mtt =
461 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
462 dev->quotas.mpt =
463 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
464 }
465
466 static int
mlx4_calc_res_counter_guaranteed(struct mlx4_dev * dev,struct resource_allocator * res_alloc,int vf)467 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
468 struct resource_allocator *res_alloc,
469 int vf)
470 {
471 struct mlx4_active_ports actv_ports;
472 int ports, counters_guaranteed;
473
474 /* For master, only allocate according to the number of phys ports */
475 if (vf == mlx4_master_func_num(dev))
476 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
477
478 /* calculate real number of ports for the VF */
479 actv_ports = mlx4_get_active_ports(dev, vf);
480 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
481 counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
482
483 /* If we do not have enough counters for this VF, do not
484 * allocate any for it. '-1' to reduce the sink counter.
485 */
486 if ((res_alloc->res_reserved + counters_guaranteed) >
487 (dev->caps.max_counters - 1))
488 return 0;
489
490 return counters_guaranteed;
491 }
492
mlx4_init_resource_tracker(struct mlx4_dev * dev)493 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
494 {
495 struct mlx4_priv *priv = mlx4_priv(dev);
496 int i, j;
497 int t;
498
499 priv->mfunc.master.res_tracker.slave_list =
500 kzalloc(dev->num_slaves * sizeof(struct slave_list),
501 GFP_KERNEL);
502 if (!priv->mfunc.master.res_tracker.slave_list)
503 return -ENOMEM;
504
505 for (i = 0 ; i < dev->num_slaves; i++) {
506 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
507 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
508 slave_list[i].res_list[t]);
509 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
510 }
511
512 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
513 dev->num_slaves);
514 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
515 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
516
517 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
518 struct resource_allocator *res_alloc =
519 &priv->mfunc.master.res_tracker.res_alloc[i];
520 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
521 sizeof(int), GFP_KERNEL);
522 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
523 sizeof(int), GFP_KERNEL);
524 if (i == RES_MAC || i == RES_VLAN)
525 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
526 (dev->persist->num_vfs
527 + 1) *
528 sizeof(int), GFP_KERNEL);
529 else
530 res_alloc->allocated = kzalloc((dev->persist->
531 num_vfs + 1) *
532 sizeof(int), GFP_KERNEL);
533 /* Reduce the sink counter */
534 if (i == RES_COUNTER)
535 res_alloc->res_free = dev->caps.max_counters - 1;
536
537 if (!res_alloc->quota || !res_alloc->guaranteed ||
538 !res_alloc->allocated)
539 goto no_mem_err;
540
541 spin_lock_init(&res_alloc->alloc_lock);
542 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
543 struct mlx4_active_ports actv_ports =
544 mlx4_get_active_ports(dev, t);
545 switch (i) {
546 case RES_QP:
547 initialize_res_quotas(dev, res_alloc, RES_QP,
548 t, dev->caps.num_qps -
549 dev->caps.reserved_qps -
550 mlx4_num_reserved_sqps(dev));
551 break;
552 case RES_CQ:
553 initialize_res_quotas(dev, res_alloc, RES_CQ,
554 t, dev->caps.num_cqs -
555 dev->caps.reserved_cqs);
556 break;
557 case RES_SRQ:
558 initialize_res_quotas(dev, res_alloc, RES_SRQ,
559 t, dev->caps.num_srqs -
560 dev->caps.reserved_srqs);
561 break;
562 case RES_MPT:
563 initialize_res_quotas(dev, res_alloc, RES_MPT,
564 t, dev->caps.num_mpts -
565 dev->caps.reserved_mrws);
566 break;
567 case RES_MTT:
568 initialize_res_quotas(dev, res_alloc, RES_MTT,
569 t, dev->caps.num_mtts -
570 dev->caps.reserved_mtts);
571 break;
572 case RES_MAC:
573 if (t == mlx4_master_func_num(dev)) {
574 int max_vfs_pport = 0;
575 /* Calculate the max vfs per port for */
576 /* both ports. */
577 for (j = 0; j < dev->caps.num_ports;
578 j++) {
579 struct mlx4_slaves_pport slaves_pport =
580 mlx4_phys_to_slaves_pport(dev, j + 1);
581 unsigned current_slaves =
582 bitmap_weight(slaves_pport.slaves,
583 dev->caps.num_ports) - 1;
584 if (max_vfs_pport < current_slaves)
585 max_vfs_pport =
586 current_slaves;
587 }
588 res_alloc->quota[t] =
589 MLX4_MAX_MAC_NUM -
590 2 * max_vfs_pport;
591 res_alloc->guaranteed[t] = 2;
592 for (j = 0; j < MLX4_MAX_PORTS; j++)
593 res_alloc->res_port_free[j] =
594 MLX4_MAX_MAC_NUM;
595 } else {
596 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
597 res_alloc->guaranteed[t] = 2;
598 }
599 break;
600 case RES_VLAN:
601 if (t == mlx4_master_func_num(dev)) {
602 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
603 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
604 for (j = 0; j < MLX4_MAX_PORTS; j++)
605 res_alloc->res_port_free[j] =
606 res_alloc->quota[t];
607 } else {
608 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
609 res_alloc->guaranteed[t] = 0;
610 }
611 break;
612 case RES_COUNTER:
613 res_alloc->quota[t] = dev->caps.max_counters;
614 res_alloc->guaranteed[t] =
615 mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
616 res_alloc->res_free -= res_alloc->guaranteed[t];
617 break;
618 default:
619 break;
620 }
621 if (i == RES_MAC || i == RES_VLAN) {
622 for (j = 0; j < dev->caps.num_ports; j++)
623 if (test_bit(j, actv_ports.ports))
624 res_alloc->res_port_rsvd[j] +=
625 res_alloc->guaranteed[t];
626 } else {
627 res_alloc->res_reserved += res_alloc->guaranteed[t];
628 }
629 }
630 }
631 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
632 return 0;
633
634 no_mem_err:
635 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
636 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
637 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
638 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
639 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
640 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
641 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
642 }
643 return -ENOMEM;
644 }
645
mlx4_free_resource_tracker(struct mlx4_dev * dev,enum mlx4_res_tracker_free_type type)646 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
647 enum mlx4_res_tracker_free_type type)
648 {
649 struct mlx4_priv *priv = mlx4_priv(dev);
650 int i;
651
652 if (priv->mfunc.master.res_tracker.slave_list) {
653 if (type != RES_TR_FREE_STRUCTS_ONLY) {
654 for (i = 0; i < dev->num_slaves; i++) {
655 if (type == RES_TR_FREE_ALL ||
656 dev->caps.function != i)
657 mlx4_delete_all_resources_for_slave(dev, i);
658 }
659 /* free master's vlans */
660 i = dev->caps.function;
661 mlx4_reset_roce_gids(dev, i);
662 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
663 rem_slave_vlans(dev, i);
664 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
665 }
666
667 if (type != RES_TR_FREE_SLAVES_ONLY) {
668 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
669 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
670 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
671 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
672 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
673 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
674 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
675 }
676 kfree(priv->mfunc.master.res_tracker.slave_list);
677 priv->mfunc.master.res_tracker.slave_list = NULL;
678 }
679 }
680 }
681
update_pkey_index(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox)682 static void update_pkey_index(struct mlx4_dev *dev, int slave,
683 struct mlx4_cmd_mailbox *inbox)
684 {
685 u8 sched = *(u8 *)(inbox->buf + 64);
686 u8 orig_index = *(u8 *)(inbox->buf + 35);
687 u8 new_index;
688 struct mlx4_priv *priv = mlx4_priv(dev);
689 int port;
690
691 port = (sched >> 6 & 1) + 1;
692
693 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
694 *(u8 *)(inbox->buf + 35) = new_index;
695 }
696
update_gid(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave)697 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
698 u8 slave)
699 {
700 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
701 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
702 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
703 int port;
704
705 if (MLX4_QP_ST_UD == ts) {
706 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
707 if (mlx4_is_eth(dev, port))
708 qp_ctx->pri_path.mgid_index =
709 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
710 else
711 qp_ctx->pri_path.mgid_index = slave | 0x80;
712
713 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
714 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
715 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
716 if (mlx4_is_eth(dev, port)) {
717 qp_ctx->pri_path.mgid_index +=
718 mlx4_get_base_gid_ix(dev, slave, port);
719 qp_ctx->pri_path.mgid_index &= 0x7f;
720 } else {
721 qp_ctx->pri_path.mgid_index = slave & 0x7F;
722 }
723 }
724 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
725 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
726 if (mlx4_is_eth(dev, port)) {
727 qp_ctx->alt_path.mgid_index +=
728 mlx4_get_base_gid_ix(dev, slave, port);
729 qp_ctx->alt_path.mgid_index &= 0x7f;
730 } else {
731 qp_ctx->alt_path.mgid_index = slave & 0x7F;
732 }
733 }
734 }
735 }
736
737 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
738 u8 slave, int port);
739
update_vport_qp_param(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave,u32 qpn)740 static int update_vport_qp_param(struct mlx4_dev *dev,
741 struct mlx4_cmd_mailbox *inbox,
742 u8 slave, u32 qpn)
743 {
744 struct mlx4_qp_context *qpc = inbox->buf + 8;
745 struct mlx4_vport_oper_state *vp_oper;
746 struct mlx4_priv *priv;
747 u32 qp_type;
748 int port, err = 0;
749
750 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
751 priv = mlx4_priv(dev);
752 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
753 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
754
755 err = handle_counter(dev, qpc, slave, port);
756 if (err)
757 goto out;
758
759 if (MLX4_VGT != vp_oper->state.default_vlan) {
760 /* the reserved QPs (special, proxy, tunnel)
761 * do not operate over vlans
762 */
763 if (mlx4_is_qp_reserved(dev, qpn))
764 return 0;
765
766 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
767 if (qp_type == MLX4_QP_ST_UD ||
768 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
769 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
770 *(__be32 *)inbox->buf =
771 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
772 MLX4_QP_OPTPAR_VLAN_STRIPPING);
773 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
774 } else {
775 struct mlx4_update_qp_params params = {.flags = 0};
776
777 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
778 if (err)
779 goto out;
780 }
781 }
782
783 /* preserve IF_COUNTER flag */
784 qpc->pri_path.vlan_control &=
785 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
786 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
787 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
788 qpc->pri_path.vlan_control |=
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
790 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
791 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
792 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
793 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
794 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
795 } else if (0 != vp_oper->state.default_vlan) {
796 qpc->pri_path.vlan_control |=
797 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
798 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
799 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
800 } else { /* priority tagged */
801 qpc->pri_path.vlan_control |=
802 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
803 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
804 }
805
806 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
807 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
808 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
809 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
810 qpc->pri_path.sched_queue &= 0xC7;
811 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
812 qpc->qos_vport = vp_oper->state.qos_vport;
813 }
814 if (vp_oper->state.spoofchk) {
815 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
816 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
817 }
818 out:
819 return err;
820 }
821
mpt_mask(struct mlx4_dev * dev)822 static int mpt_mask(struct mlx4_dev *dev)
823 {
824 return dev->caps.num_mpts - 1;
825 }
826
find_res(struct mlx4_dev * dev,u64 res_id,enum mlx4_resource type)827 static void *find_res(struct mlx4_dev *dev, u64 res_id,
828 enum mlx4_resource type)
829 {
830 struct mlx4_priv *priv = mlx4_priv(dev);
831
832 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
833 res_id);
834 }
835
get_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type,void * res)836 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
837 enum mlx4_resource type,
838 void *res)
839 {
840 struct res_common *r;
841 int err = 0;
842
843 spin_lock_irq(mlx4_tlock(dev));
844 r = find_res(dev, res_id, type);
845 if (!r) {
846 err = -ENONET;
847 goto exit;
848 }
849
850 if (r->state == RES_ANY_BUSY) {
851 err = -EBUSY;
852 goto exit;
853 }
854
855 if (r->owner != slave) {
856 err = -EPERM;
857 goto exit;
858 }
859
860 r->from_state = r->state;
861 r->state = RES_ANY_BUSY;
862
863 if (res)
864 *((struct res_common **)res) = r;
865
866 exit:
867 spin_unlock_irq(mlx4_tlock(dev));
868 return err;
869 }
870
mlx4_get_slave_from_resource_id(struct mlx4_dev * dev,enum mlx4_resource type,u64 res_id,int * slave)871 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
872 enum mlx4_resource type,
873 u64 res_id, int *slave)
874 {
875
876 struct res_common *r;
877 int err = -ENOENT;
878 int id = res_id;
879
880 if (type == RES_QP)
881 id &= 0x7fffff;
882 spin_lock(mlx4_tlock(dev));
883
884 r = find_res(dev, id, type);
885 if (r) {
886 *slave = r->owner;
887 err = 0;
888 }
889 spin_unlock(mlx4_tlock(dev));
890
891 return err;
892 }
893
put_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type)894 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
895 enum mlx4_resource type)
896 {
897 struct res_common *r;
898
899 spin_lock_irq(mlx4_tlock(dev));
900 r = find_res(dev, res_id, type);
901 if (r)
902 r->state = r->from_state;
903 spin_unlock_irq(mlx4_tlock(dev));
904 }
905
906 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
907 u64 in_param, u64 *out_param, int port);
908
handle_existing_counter(struct mlx4_dev * dev,u8 slave,int port,int counter_index)909 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
910 int counter_index)
911 {
912 struct res_common *r;
913 struct res_counter *counter;
914 int ret = 0;
915
916 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
917 return ret;
918
919 spin_lock_irq(mlx4_tlock(dev));
920 r = find_res(dev, counter_index, RES_COUNTER);
921 if (!r || r->owner != slave) {
922 ret = -EINVAL;
923 } else {
924 counter = container_of(r, struct res_counter, com);
925 if (!counter->port)
926 counter->port = port;
927 }
928
929 spin_unlock_irq(mlx4_tlock(dev));
930 return ret;
931 }
932
handle_unexisting_counter(struct mlx4_dev * dev,struct mlx4_qp_context * qpc,u8 slave,int port)933 static int handle_unexisting_counter(struct mlx4_dev *dev,
934 struct mlx4_qp_context *qpc, u8 slave,
935 int port)
936 {
937 struct mlx4_priv *priv = mlx4_priv(dev);
938 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
939 struct res_common *tmp;
940 struct res_counter *counter;
941 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
942 int err = 0;
943
944 spin_lock_irq(mlx4_tlock(dev));
945 list_for_each_entry(tmp,
946 &tracker->slave_list[slave].res_list[RES_COUNTER],
947 list) {
948 counter = container_of(tmp, struct res_counter, com);
949 if (port == counter->port) {
950 qpc->pri_path.counter_index = counter->com.res_id;
951 spin_unlock_irq(mlx4_tlock(dev));
952 return 0;
953 }
954 }
955 spin_unlock_irq(mlx4_tlock(dev));
956
957 /* No existing counter, need to allocate a new counter */
958 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
959 port);
960 if (err == -ENOENT) {
961 err = 0;
962 } else if (err && err != -ENOSPC) {
963 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
964 __func__, slave, err);
965 } else {
966 qpc->pri_path.counter_index = counter_idx;
967 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
968 __func__, slave, qpc->pri_path.counter_index);
969 err = 0;
970 }
971
972 return err;
973 }
974
handle_counter(struct mlx4_dev * dev,struct mlx4_qp_context * qpc,u8 slave,int port)975 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
976 u8 slave, int port)
977 {
978 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
979 return handle_existing_counter(dev, slave, port,
980 qpc->pri_path.counter_index);
981
982 return handle_unexisting_counter(dev, qpc, slave, port);
983 }
984
alloc_qp_tr(int id)985 static struct res_common *alloc_qp_tr(int id)
986 {
987 struct res_qp *ret;
988
989 ret = kzalloc(sizeof *ret, GFP_KERNEL);
990 if (!ret)
991 return NULL;
992
993 ret->com.res_id = id;
994 ret->com.state = RES_QP_RESERVED;
995 ret->local_qpn = id;
996 INIT_LIST_HEAD(&ret->mcg_list);
997 spin_lock_init(&ret->mcg_spl);
998 atomic_set(&ret->ref_count, 0);
999
1000 return &ret->com;
1001 }
1002
alloc_mtt_tr(int id,int order)1003 static struct res_common *alloc_mtt_tr(int id, int order)
1004 {
1005 struct res_mtt *ret;
1006
1007 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1008 if (!ret)
1009 return NULL;
1010
1011 ret->com.res_id = id;
1012 ret->order = order;
1013 ret->com.state = RES_MTT_ALLOCATED;
1014 atomic_set(&ret->ref_count, 0);
1015
1016 return &ret->com;
1017 }
1018
alloc_mpt_tr(int id,int key)1019 static struct res_common *alloc_mpt_tr(int id, int key)
1020 {
1021 struct res_mpt *ret;
1022
1023 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1024 if (!ret)
1025 return NULL;
1026
1027 ret->com.res_id = id;
1028 ret->com.state = RES_MPT_RESERVED;
1029 ret->key = key;
1030
1031 return &ret->com;
1032 }
1033
alloc_eq_tr(int id)1034 static struct res_common *alloc_eq_tr(int id)
1035 {
1036 struct res_eq *ret;
1037
1038 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1039 if (!ret)
1040 return NULL;
1041
1042 ret->com.res_id = id;
1043 ret->com.state = RES_EQ_RESERVED;
1044
1045 return &ret->com;
1046 }
1047
alloc_cq_tr(int id)1048 static struct res_common *alloc_cq_tr(int id)
1049 {
1050 struct res_cq *ret;
1051
1052 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1053 if (!ret)
1054 return NULL;
1055
1056 ret->com.res_id = id;
1057 ret->com.state = RES_CQ_ALLOCATED;
1058 atomic_set(&ret->ref_count, 0);
1059
1060 return &ret->com;
1061 }
1062
alloc_srq_tr(int id)1063 static struct res_common *alloc_srq_tr(int id)
1064 {
1065 struct res_srq *ret;
1066
1067 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1068 if (!ret)
1069 return NULL;
1070
1071 ret->com.res_id = id;
1072 ret->com.state = RES_SRQ_ALLOCATED;
1073 atomic_set(&ret->ref_count, 0);
1074
1075 return &ret->com;
1076 }
1077
alloc_counter_tr(int id,int port)1078 static struct res_common *alloc_counter_tr(int id, int port)
1079 {
1080 struct res_counter *ret;
1081
1082 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1083 if (!ret)
1084 return NULL;
1085
1086 ret->com.res_id = id;
1087 ret->com.state = RES_COUNTER_ALLOCATED;
1088 ret->port = port;
1089
1090 return &ret->com;
1091 }
1092
alloc_xrcdn_tr(int id)1093 static struct res_common *alloc_xrcdn_tr(int id)
1094 {
1095 struct res_xrcdn *ret;
1096
1097 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1098 if (!ret)
1099 return NULL;
1100
1101 ret->com.res_id = id;
1102 ret->com.state = RES_XRCD_ALLOCATED;
1103
1104 return &ret->com;
1105 }
1106
alloc_fs_rule_tr(u64 id,int qpn)1107 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1108 {
1109 struct res_fs_rule *ret;
1110
1111 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1112 if (!ret)
1113 return NULL;
1114
1115 ret->com.res_id = id;
1116 ret->com.state = RES_FS_RULE_ALLOCATED;
1117 ret->qpn = qpn;
1118 return &ret->com;
1119 }
1120
alloc_tr(u64 id,enum mlx4_resource type,int slave,int extra)1121 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1122 int extra)
1123 {
1124 struct res_common *ret;
1125
1126 switch (type) {
1127 case RES_QP:
1128 ret = alloc_qp_tr(id);
1129 break;
1130 case RES_MPT:
1131 ret = alloc_mpt_tr(id, extra);
1132 break;
1133 case RES_MTT:
1134 ret = alloc_mtt_tr(id, extra);
1135 break;
1136 case RES_EQ:
1137 ret = alloc_eq_tr(id);
1138 break;
1139 case RES_CQ:
1140 ret = alloc_cq_tr(id);
1141 break;
1142 case RES_SRQ:
1143 ret = alloc_srq_tr(id);
1144 break;
1145 case RES_MAC:
1146 pr_err("implementation missing\n");
1147 return NULL;
1148 case RES_COUNTER:
1149 ret = alloc_counter_tr(id, extra);
1150 break;
1151 case RES_XRCD:
1152 ret = alloc_xrcdn_tr(id);
1153 break;
1154 case RES_FS_RULE:
1155 ret = alloc_fs_rule_tr(id, extra);
1156 break;
1157 default:
1158 return NULL;
1159 }
1160 if (ret)
1161 ret->owner = slave;
1162
1163 return ret;
1164 }
1165
mlx4_calc_vf_counters(struct mlx4_dev * dev,int slave,int port,struct mlx4_counter * data)1166 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1167 struct mlx4_counter *data)
1168 {
1169 struct mlx4_priv *priv = mlx4_priv(dev);
1170 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1171 struct res_common *tmp;
1172 struct res_counter *counter;
1173 int *counters_arr;
1174 int i = 0, err = 0;
1175
1176 memset(data, 0, sizeof(*data));
1177
1178 counters_arr = kmalloc_array(dev->caps.max_counters,
1179 sizeof(*counters_arr), GFP_KERNEL);
1180 if (!counters_arr)
1181 return -ENOMEM;
1182
1183 spin_lock_irq(mlx4_tlock(dev));
1184 list_for_each_entry(tmp,
1185 &tracker->slave_list[slave].res_list[RES_COUNTER],
1186 list) {
1187 counter = container_of(tmp, struct res_counter, com);
1188 if (counter->port == port) {
1189 counters_arr[i] = (int)tmp->res_id;
1190 i++;
1191 }
1192 }
1193 spin_unlock_irq(mlx4_tlock(dev));
1194 counters_arr[i] = -1;
1195
1196 i = 0;
1197
1198 while (counters_arr[i] != -1) {
1199 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1200 0);
1201 if (err) {
1202 memset(data, 0, sizeof(*data));
1203 goto table_changed;
1204 }
1205 i++;
1206 }
1207
1208 table_changed:
1209 kfree(counters_arr);
1210 return 0;
1211 }
1212
add_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1213 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1214 enum mlx4_resource type, int extra)
1215 {
1216 int i;
1217 int err;
1218 struct mlx4_priv *priv = mlx4_priv(dev);
1219 struct res_common **res_arr;
1220 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1221 struct rb_root *root = &tracker->res_tree[type];
1222
1223 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1224 if (!res_arr)
1225 return -ENOMEM;
1226
1227 for (i = 0; i < count; ++i) {
1228 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1229 if (!res_arr[i]) {
1230 for (--i; i >= 0; --i)
1231 kfree(res_arr[i]);
1232
1233 kfree(res_arr);
1234 return -ENOMEM;
1235 }
1236 }
1237
1238 spin_lock_irq(mlx4_tlock(dev));
1239 for (i = 0; i < count; ++i) {
1240 if (find_res(dev, base + i, type)) {
1241 err = -EEXIST;
1242 goto undo;
1243 }
1244 err = res_tracker_insert(root, res_arr[i]);
1245 if (err)
1246 goto undo;
1247 list_add_tail(&res_arr[i]->list,
1248 &tracker->slave_list[slave].res_list[type]);
1249 }
1250 spin_unlock_irq(mlx4_tlock(dev));
1251 kfree(res_arr);
1252
1253 return 0;
1254
1255 undo:
1256 for (--i; i >= 0; --i) {
1257 rb_erase(&res_arr[i]->node, root);
1258 list_del_init(&res_arr[i]->list);
1259 }
1260
1261 spin_unlock_irq(mlx4_tlock(dev));
1262
1263 for (i = 0; i < count; ++i)
1264 kfree(res_arr[i]);
1265
1266 kfree(res_arr);
1267
1268 return err;
1269 }
1270
remove_qp_ok(struct res_qp * res)1271 static int remove_qp_ok(struct res_qp *res)
1272 {
1273 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1274 !list_empty(&res->mcg_list)) {
1275 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1276 res->com.state, atomic_read(&res->ref_count));
1277 return -EBUSY;
1278 } else if (res->com.state != RES_QP_RESERVED) {
1279 return -EPERM;
1280 }
1281
1282 return 0;
1283 }
1284
remove_mtt_ok(struct res_mtt * res,int order)1285 static int remove_mtt_ok(struct res_mtt *res, int order)
1286 {
1287 if (res->com.state == RES_MTT_BUSY ||
1288 atomic_read(&res->ref_count)) {
1289 pr_devel("%s-%d: state %s, ref_count %d\n",
1290 __func__, __LINE__,
1291 mtt_states_str(res->com.state),
1292 atomic_read(&res->ref_count));
1293 return -EBUSY;
1294 } else if (res->com.state != RES_MTT_ALLOCATED)
1295 return -EPERM;
1296 else if (res->order != order)
1297 return -EINVAL;
1298
1299 return 0;
1300 }
1301
remove_mpt_ok(struct res_mpt * res)1302 static int remove_mpt_ok(struct res_mpt *res)
1303 {
1304 if (res->com.state == RES_MPT_BUSY)
1305 return -EBUSY;
1306 else if (res->com.state != RES_MPT_RESERVED)
1307 return -EPERM;
1308
1309 return 0;
1310 }
1311
remove_eq_ok(struct res_eq * res)1312 static int remove_eq_ok(struct res_eq *res)
1313 {
1314 if (res->com.state == RES_MPT_BUSY)
1315 return -EBUSY;
1316 else if (res->com.state != RES_MPT_RESERVED)
1317 return -EPERM;
1318
1319 return 0;
1320 }
1321
remove_counter_ok(struct res_counter * res)1322 static int remove_counter_ok(struct res_counter *res)
1323 {
1324 if (res->com.state == RES_COUNTER_BUSY)
1325 return -EBUSY;
1326 else if (res->com.state != RES_COUNTER_ALLOCATED)
1327 return -EPERM;
1328
1329 return 0;
1330 }
1331
remove_xrcdn_ok(struct res_xrcdn * res)1332 static int remove_xrcdn_ok(struct res_xrcdn *res)
1333 {
1334 if (res->com.state == RES_XRCD_BUSY)
1335 return -EBUSY;
1336 else if (res->com.state != RES_XRCD_ALLOCATED)
1337 return -EPERM;
1338
1339 return 0;
1340 }
1341
remove_fs_rule_ok(struct res_fs_rule * res)1342 static int remove_fs_rule_ok(struct res_fs_rule *res)
1343 {
1344 if (res->com.state == RES_FS_RULE_BUSY)
1345 return -EBUSY;
1346 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1347 return -EPERM;
1348
1349 return 0;
1350 }
1351
remove_cq_ok(struct res_cq * res)1352 static int remove_cq_ok(struct res_cq *res)
1353 {
1354 if (res->com.state == RES_CQ_BUSY)
1355 return -EBUSY;
1356 else if (res->com.state != RES_CQ_ALLOCATED)
1357 return -EPERM;
1358
1359 return 0;
1360 }
1361
remove_srq_ok(struct res_srq * res)1362 static int remove_srq_ok(struct res_srq *res)
1363 {
1364 if (res->com.state == RES_SRQ_BUSY)
1365 return -EBUSY;
1366 else if (res->com.state != RES_SRQ_ALLOCATED)
1367 return -EPERM;
1368
1369 return 0;
1370 }
1371
remove_ok(struct res_common * res,enum mlx4_resource type,int extra)1372 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1373 {
1374 switch (type) {
1375 case RES_QP:
1376 return remove_qp_ok((struct res_qp *)res);
1377 case RES_CQ:
1378 return remove_cq_ok((struct res_cq *)res);
1379 case RES_SRQ:
1380 return remove_srq_ok((struct res_srq *)res);
1381 case RES_MPT:
1382 return remove_mpt_ok((struct res_mpt *)res);
1383 case RES_MTT:
1384 return remove_mtt_ok((struct res_mtt *)res, extra);
1385 case RES_MAC:
1386 return -ENOSYS;
1387 case RES_EQ:
1388 return remove_eq_ok((struct res_eq *)res);
1389 case RES_COUNTER:
1390 return remove_counter_ok((struct res_counter *)res);
1391 case RES_XRCD:
1392 return remove_xrcdn_ok((struct res_xrcdn *)res);
1393 case RES_FS_RULE:
1394 return remove_fs_rule_ok((struct res_fs_rule *)res);
1395 default:
1396 return -EINVAL;
1397 }
1398 }
1399
rem_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1400 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1401 enum mlx4_resource type, int extra)
1402 {
1403 u64 i;
1404 int err;
1405 struct mlx4_priv *priv = mlx4_priv(dev);
1406 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1407 struct res_common *r;
1408
1409 spin_lock_irq(mlx4_tlock(dev));
1410 for (i = base; i < base + count; ++i) {
1411 r = res_tracker_lookup(&tracker->res_tree[type], i);
1412 if (!r) {
1413 err = -ENOENT;
1414 goto out;
1415 }
1416 if (r->owner != slave) {
1417 err = -EPERM;
1418 goto out;
1419 }
1420 err = remove_ok(r, type, extra);
1421 if (err)
1422 goto out;
1423 }
1424
1425 for (i = base; i < base + count; ++i) {
1426 r = res_tracker_lookup(&tracker->res_tree[type], i);
1427 rb_erase(&r->node, &tracker->res_tree[type]);
1428 list_del(&r->list);
1429 kfree(r);
1430 }
1431 err = 0;
1432
1433 out:
1434 spin_unlock_irq(mlx4_tlock(dev));
1435
1436 return err;
1437 }
1438
qp_res_start_move_to(struct mlx4_dev * dev,int slave,int qpn,enum res_qp_states state,struct res_qp ** qp,int alloc)1439 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1440 enum res_qp_states state, struct res_qp **qp,
1441 int alloc)
1442 {
1443 struct mlx4_priv *priv = mlx4_priv(dev);
1444 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1445 struct res_qp *r;
1446 int err = 0;
1447
1448 spin_lock_irq(mlx4_tlock(dev));
1449 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1450 if (!r)
1451 err = -ENOENT;
1452 else if (r->com.owner != slave)
1453 err = -EPERM;
1454 else {
1455 switch (state) {
1456 case RES_QP_BUSY:
1457 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1458 __func__, r->com.res_id);
1459 err = -EBUSY;
1460 break;
1461
1462 case RES_QP_RESERVED:
1463 if (r->com.state == RES_QP_MAPPED && !alloc)
1464 break;
1465
1466 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1467 err = -EINVAL;
1468 break;
1469
1470 case RES_QP_MAPPED:
1471 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1472 r->com.state == RES_QP_HW)
1473 break;
1474 else {
1475 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1476 r->com.res_id);
1477 err = -EINVAL;
1478 }
1479
1480 break;
1481
1482 case RES_QP_HW:
1483 if (r->com.state != RES_QP_MAPPED)
1484 err = -EINVAL;
1485 break;
1486 default:
1487 err = -EINVAL;
1488 }
1489
1490 if (!err) {
1491 r->com.from_state = r->com.state;
1492 r->com.to_state = state;
1493 r->com.state = RES_QP_BUSY;
1494 if (qp)
1495 *qp = r;
1496 }
1497 }
1498
1499 spin_unlock_irq(mlx4_tlock(dev));
1500
1501 return err;
1502 }
1503
mr_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_mpt_states state,struct res_mpt ** mpt)1504 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1505 enum res_mpt_states state, struct res_mpt **mpt)
1506 {
1507 struct mlx4_priv *priv = mlx4_priv(dev);
1508 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1509 struct res_mpt *r;
1510 int err = 0;
1511
1512 spin_lock_irq(mlx4_tlock(dev));
1513 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1514 if (!r)
1515 err = -ENOENT;
1516 else if (r->com.owner != slave)
1517 err = -EPERM;
1518 else {
1519 switch (state) {
1520 case RES_MPT_BUSY:
1521 err = -EINVAL;
1522 break;
1523
1524 case RES_MPT_RESERVED:
1525 if (r->com.state != RES_MPT_MAPPED)
1526 err = -EINVAL;
1527 break;
1528
1529 case RES_MPT_MAPPED:
1530 if (r->com.state != RES_MPT_RESERVED &&
1531 r->com.state != RES_MPT_HW)
1532 err = -EINVAL;
1533 break;
1534
1535 case RES_MPT_HW:
1536 if (r->com.state != RES_MPT_MAPPED)
1537 err = -EINVAL;
1538 break;
1539 default:
1540 err = -EINVAL;
1541 }
1542
1543 if (!err) {
1544 r->com.from_state = r->com.state;
1545 r->com.to_state = state;
1546 r->com.state = RES_MPT_BUSY;
1547 if (mpt)
1548 *mpt = r;
1549 }
1550 }
1551
1552 spin_unlock_irq(mlx4_tlock(dev));
1553
1554 return err;
1555 }
1556
eq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_eq_states state,struct res_eq ** eq)1557 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1558 enum res_eq_states state, struct res_eq **eq)
1559 {
1560 struct mlx4_priv *priv = mlx4_priv(dev);
1561 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1562 struct res_eq *r;
1563 int err = 0;
1564
1565 spin_lock_irq(mlx4_tlock(dev));
1566 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1567 if (!r)
1568 err = -ENOENT;
1569 else if (r->com.owner != slave)
1570 err = -EPERM;
1571 else {
1572 switch (state) {
1573 case RES_EQ_BUSY:
1574 err = -EINVAL;
1575 break;
1576
1577 case RES_EQ_RESERVED:
1578 if (r->com.state != RES_EQ_HW)
1579 err = -EINVAL;
1580 break;
1581
1582 case RES_EQ_HW:
1583 if (r->com.state != RES_EQ_RESERVED)
1584 err = -EINVAL;
1585 break;
1586
1587 default:
1588 err = -EINVAL;
1589 }
1590
1591 if (!err) {
1592 r->com.from_state = r->com.state;
1593 r->com.to_state = state;
1594 r->com.state = RES_EQ_BUSY;
1595 if (eq)
1596 *eq = r;
1597 }
1598 }
1599
1600 spin_unlock_irq(mlx4_tlock(dev));
1601
1602 return err;
1603 }
1604
cq_res_start_move_to(struct mlx4_dev * dev,int slave,int cqn,enum res_cq_states state,struct res_cq ** cq)1605 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1606 enum res_cq_states state, struct res_cq **cq)
1607 {
1608 struct mlx4_priv *priv = mlx4_priv(dev);
1609 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1610 struct res_cq *r;
1611 int err;
1612
1613 spin_lock_irq(mlx4_tlock(dev));
1614 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1615 if (!r) {
1616 err = -ENOENT;
1617 } else if (r->com.owner != slave) {
1618 err = -EPERM;
1619 } else if (state == RES_CQ_ALLOCATED) {
1620 if (r->com.state != RES_CQ_HW)
1621 err = -EINVAL;
1622 else if (atomic_read(&r->ref_count))
1623 err = -EBUSY;
1624 else
1625 err = 0;
1626 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1627 err = -EINVAL;
1628 } else {
1629 err = 0;
1630 }
1631
1632 if (!err) {
1633 r->com.from_state = r->com.state;
1634 r->com.to_state = state;
1635 r->com.state = RES_CQ_BUSY;
1636 if (cq)
1637 *cq = r;
1638 }
1639
1640 spin_unlock_irq(mlx4_tlock(dev));
1641
1642 return err;
1643 }
1644
srq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_srq_states state,struct res_srq ** srq)1645 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1646 enum res_srq_states state, struct res_srq **srq)
1647 {
1648 struct mlx4_priv *priv = mlx4_priv(dev);
1649 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1650 struct res_srq *r;
1651 int err = 0;
1652
1653 spin_lock_irq(mlx4_tlock(dev));
1654 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1655 if (!r) {
1656 err = -ENOENT;
1657 } else if (r->com.owner != slave) {
1658 err = -EPERM;
1659 } else if (state == RES_SRQ_ALLOCATED) {
1660 if (r->com.state != RES_SRQ_HW)
1661 err = -EINVAL;
1662 else if (atomic_read(&r->ref_count))
1663 err = -EBUSY;
1664 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1665 err = -EINVAL;
1666 }
1667
1668 if (!err) {
1669 r->com.from_state = r->com.state;
1670 r->com.to_state = state;
1671 r->com.state = RES_SRQ_BUSY;
1672 if (srq)
1673 *srq = r;
1674 }
1675
1676 spin_unlock_irq(mlx4_tlock(dev));
1677
1678 return err;
1679 }
1680
res_abort_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1681 static void res_abort_move(struct mlx4_dev *dev, int slave,
1682 enum mlx4_resource type, int id)
1683 {
1684 struct mlx4_priv *priv = mlx4_priv(dev);
1685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1686 struct res_common *r;
1687
1688 spin_lock_irq(mlx4_tlock(dev));
1689 r = res_tracker_lookup(&tracker->res_tree[type], id);
1690 if (r && (r->owner == slave))
1691 r->state = r->from_state;
1692 spin_unlock_irq(mlx4_tlock(dev));
1693 }
1694
res_end_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1695 static void res_end_move(struct mlx4_dev *dev, int slave,
1696 enum mlx4_resource type, int id)
1697 {
1698 struct mlx4_priv *priv = mlx4_priv(dev);
1699 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1700 struct res_common *r;
1701
1702 spin_lock_irq(mlx4_tlock(dev));
1703 r = res_tracker_lookup(&tracker->res_tree[type], id);
1704 if (r && (r->owner == slave))
1705 r->state = r->to_state;
1706 spin_unlock_irq(mlx4_tlock(dev));
1707 }
1708
valid_reserved(struct mlx4_dev * dev,int slave,int qpn)1709 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1710 {
1711 return mlx4_is_qp_reserved(dev, qpn) &&
1712 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1713 }
1714
fw_reserved(struct mlx4_dev * dev,int qpn)1715 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1716 {
1717 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1718 }
1719
qp_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1720 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1721 u64 in_param, u64 *out_param)
1722 {
1723 int err;
1724 int count;
1725 int align;
1726 int base;
1727 int qpn;
1728 u8 flags;
1729
1730 switch (op) {
1731 case RES_OP_RESERVE:
1732 count = get_param_l(&in_param) & 0xffffff;
1733 /* Turn off all unsupported QP allocation flags that the
1734 * slave tries to set.
1735 */
1736 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1737 align = get_param_h(&in_param);
1738 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1739 if (err)
1740 return err;
1741
1742 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1743 if (err) {
1744 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1745 return err;
1746 }
1747
1748 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1749 if (err) {
1750 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1751 __mlx4_qp_release_range(dev, base, count);
1752 return err;
1753 }
1754 set_param_l(out_param, base);
1755 break;
1756 case RES_OP_MAP_ICM:
1757 qpn = get_param_l(&in_param) & 0x7fffff;
1758 if (valid_reserved(dev, slave, qpn)) {
1759 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1760 if (err)
1761 return err;
1762 }
1763
1764 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1765 NULL, 1);
1766 if (err)
1767 return err;
1768
1769 if (!fw_reserved(dev, qpn)) {
1770 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1771 if (err) {
1772 res_abort_move(dev, slave, RES_QP, qpn);
1773 return err;
1774 }
1775 }
1776
1777 res_end_move(dev, slave, RES_QP, qpn);
1778 break;
1779
1780 default:
1781 err = -EINVAL;
1782 break;
1783 }
1784 return err;
1785 }
1786
mtt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1787 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1788 u64 in_param, u64 *out_param)
1789 {
1790 int err = -EINVAL;
1791 int base;
1792 int order;
1793
1794 if (op != RES_OP_RESERVE_AND_MAP)
1795 return err;
1796
1797 order = get_param_l(&in_param);
1798
1799 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1800 if (err)
1801 return err;
1802
1803 base = __mlx4_alloc_mtt_range(dev, order);
1804 if (base == -1) {
1805 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1806 return -ENOMEM;
1807 }
1808
1809 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1810 if (err) {
1811 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1812 __mlx4_free_mtt_range(dev, base, order);
1813 } else {
1814 set_param_l(out_param, base);
1815 }
1816
1817 return err;
1818 }
1819
mpt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1820 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1821 u64 in_param, u64 *out_param)
1822 {
1823 int err = -EINVAL;
1824 int index;
1825 int id;
1826 struct res_mpt *mpt;
1827
1828 switch (op) {
1829 case RES_OP_RESERVE:
1830 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1831 if (err)
1832 break;
1833
1834 index = __mlx4_mpt_reserve(dev);
1835 if (index == -1) {
1836 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1837 break;
1838 }
1839 id = index & mpt_mask(dev);
1840
1841 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1842 if (err) {
1843 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1844 __mlx4_mpt_release(dev, index);
1845 break;
1846 }
1847 set_param_l(out_param, index);
1848 break;
1849 case RES_OP_MAP_ICM:
1850 index = get_param_l(&in_param);
1851 id = index & mpt_mask(dev);
1852 err = mr_res_start_move_to(dev, slave, id,
1853 RES_MPT_MAPPED, &mpt);
1854 if (err)
1855 return err;
1856
1857 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1858 if (err) {
1859 res_abort_move(dev, slave, RES_MPT, id);
1860 return err;
1861 }
1862
1863 res_end_move(dev, slave, RES_MPT, id);
1864 break;
1865 }
1866 return err;
1867 }
1868
cq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1869 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1870 u64 in_param, u64 *out_param)
1871 {
1872 int cqn;
1873 int err;
1874
1875 switch (op) {
1876 case RES_OP_RESERVE_AND_MAP:
1877 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1878 if (err)
1879 break;
1880
1881 err = __mlx4_cq_alloc_icm(dev, &cqn);
1882 if (err) {
1883 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1884 break;
1885 }
1886
1887 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1888 if (err) {
1889 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1890 __mlx4_cq_free_icm(dev, cqn);
1891 break;
1892 }
1893
1894 set_param_l(out_param, cqn);
1895 break;
1896
1897 default:
1898 err = -EINVAL;
1899 }
1900
1901 return err;
1902 }
1903
srq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1904 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1905 u64 in_param, u64 *out_param)
1906 {
1907 int srqn;
1908 int err;
1909
1910 switch (op) {
1911 case RES_OP_RESERVE_AND_MAP:
1912 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1913 if (err)
1914 break;
1915
1916 err = __mlx4_srq_alloc_icm(dev, &srqn);
1917 if (err) {
1918 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1919 break;
1920 }
1921
1922 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1923 if (err) {
1924 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1925 __mlx4_srq_free_icm(dev, srqn);
1926 break;
1927 }
1928
1929 set_param_l(out_param, srqn);
1930 break;
1931
1932 default:
1933 err = -EINVAL;
1934 }
1935
1936 return err;
1937 }
1938
mac_find_smac_ix_in_slave(struct mlx4_dev * dev,int slave,int port,u8 smac_index,u64 * mac)1939 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1940 u8 smac_index, u64 *mac)
1941 {
1942 struct mlx4_priv *priv = mlx4_priv(dev);
1943 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1944 struct list_head *mac_list =
1945 &tracker->slave_list[slave].res_list[RES_MAC];
1946 struct mac_res *res, *tmp;
1947
1948 list_for_each_entry_safe(res, tmp, mac_list, list) {
1949 if (res->smac_index == smac_index && res->port == (u8) port) {
1950 *mac = res->mac;
1951 return 0;
1952 }
1953 }
1954 return -ENOENT;
1955 }
1956
mac_add_to_slave(struct mlx4_dev * dev,int slave,u64 mac,int port,u8 smac_index)1957 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1958 {
1959 struct mlx4_priv *priv = mlx4_priv(dev);
1960 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1961 struct list_head *mac_list =
1962 &tracker->slave_list[slave].res_list[RES_MAC];
1963 struct mac_res *res, *tmp;
1964
1965 list_for_each_entry_safe(res, tmp, mac_list, list) {
1966 if (res->mac == mac && res->port == (u8) port) {
1967 /* mac found. update ref count */
1968 ++res->ref_count;
1969 return 0;
1970 }
1971 }
1972
1973 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1974 return -EINVAL;
1975 res = kzalloc(sizeof *res, GFP_KERNEL);
1976 if (!res) {
1977 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1978 return -ENOMEM;
1979 }
1980 res->mac = mac;
1981 res->port = (u8) port;
1982 res->smac_index = smac_index;
1983 res->ref_count = 1;
1984 list_add_tail(&res->list,
1985 &tracker->slave_list[slave].res_list[RES_MAC]);
1986 return 0;
1987 }
1988
mac_del_from_slave(struct mlx4_dev * dev,int slave,u64 mac,int port)1989 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1990 int port)
1991 {
1992 struct mlx4_priv *priv = mlx4_priv(dev);
1993 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1994 struct list_head *mac_list =
1995 &tracker->slave_list[slave].res_list[RES_MAC];
1996 struct mac_res *res, *tmp;
1997
1998 list_for_each_entry_safe(res, tmp, mac_list, list) {
1999 if (res->mac == mac && res->port == (u8) port) {
2000 if (!--res->ref_count) {
2001 list_del(&res->list);
2002 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2003 kfree(res);
2004 }
2005 break;
2006 }
2007 }
2008 }
2009
rem_slave_macs(struct mlx4_dev * dev,int slave)2010 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2011 {
2012 struct mlx4_priv *priv = mlx4_priv(dev);
2013 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2014 struct list_head *mac_list =
2015 &tracker->slave_list[slave].res_list[RES_MAC];
2016 struct mac_res *res, *tmp;
2017 int i;
2018
2019 list_for_each_entry_safe(res, tmp, mac_list, list) {
2020 list_del(&res->list);
2021 /* dereference the mac the num times the slave referenced it */
2022 for (i = 0; i < res->ref_count; i++)
2023 __mlx4_unregister_mac(dev, res->port, res->mac);
2024 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2025 kfree(res);
2026 }
2027 }
2028
mac_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2029 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2030 u64 in_param, u64 *out_param, int in_port)
2031 {
2032 int err = -EINVAL;
2033 int port;
2034 u64 mac;
2035 u8 smac_index;
2036
2037 if (op != RES_OP_RESERVE_AND_MAP)
2038 return err;
2039
2040 port = !in_port ? get_param_l(out_param) : in_port;
2041 port = mlx4_slave_convert_port(
2042 dev, slave, port);
2043
2044 if (port < 0)
2045 return -EINVAL;
2046 mac = in_param;
2047
2048 err = __mlx4_register_mac(dev, port, mac);
2049 if (err >= 0) {
2050 smac_index = err;
2051 set_param_l(out_param, err);
2052 err = 0;
2053 }
2054
2055 if (!err) {
2056 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2057 if (err)
2058 __mlx4_unregister_mac(dev, port, mac);
2059 }
2060 return err;
2061 }
2062
vlan_add_to_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port,int vlan_index)2063 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2064 int port, int vlan_index)
2065 {
2066 struct mlx4_priv *priv = mlx4_priv(dev);
2067 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2068 struct list_head *vlan_list =
2069 &tracker->slave_list[slave].res_list[RES_VLAN];
2070 struct vlan_res *res, *tmp;
2071
2072 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2073 if (res->vlan == vlan && res->port == (u8) port) {
2074 /* vlan found. update ref count */
2075 ++res->ref_count;
2076 return 0;
2077 }
2078 }
2079
2080 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2081 return -EINVAL;
2082 res = kzalloc(sizeof(*res), GFP_KERNEL);
2083 if (!res) {
2084 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2085 return -ENOMEM;
2086 }
2087 res->vlan = vlan;
2088 res->port = (u8) port;
2089 res->vlan_index = vlan_index;
2090 res->ref_count = 1;
2091 list_add_tail(&res->list,
2092 &tracker->slave_list[slave].res_list[RES_VLAN]);
2093 return 0;
2094 }
2095
2096
vlan_del_from_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port)2097 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2098 int port)
2099 {
2100 struct mlx4_priv *priv = mlx4_priv(dev);
2101 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2102 struct list_head *vlan_list =
2103 &tracker->slave_list[slave].res_list[RES_VLAN];
2104 struct vlan_res *res, *tmp;
2105
2106 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2107 if (res->vlan == vlan && res->port == (u8) port) {
2108 if (!--res->ref_count) {
2109 list_del(&res->list);
2110 mlx4_release_resource(dev, slave, RES_VLAN,
2111 1, port);
2112 kfree(res);
2113 }
2114 break;
2115 }
2116 }
2117 }
2118
rem_slave_vlans(struct mlx4_dev * dev,int slave)2119 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2120 {
2121 struct mlx4_priv *priv = mlx4_priv(dev);
2122 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2123 struct list_head *vlan_list =
2124 &tracker->slave_list[slave].res_list[RES_VLAN];
2125 struct vlan_res *res, *tmp;
2126 int i;
2127
2128 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2129 list_del(&res->list);
2130 /* dereference the vlan the num times the slave referenced it */
2131 for (i = 0; i < res->ref_count; i++)
2132 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2133 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2134 kfree(res);
2135 }
2136 }
2137
vlan_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2138 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2139 u64 in_param, u64 *out_param, int in_port)
2140 {
2141 struct mlx4_priv *priv = mlx4_priv(dev);
2142 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2143 int err;
2144 u16 vlan;
2145 int vlan_index;
2146 int port;
2147
2148 port = !in_port ? get_param_l(out_param) : in_port;
2149
2150 if (!port || op != RES_OP_RESERVE_AND_MAP)
2151 return -EINVAL;
2152
2153 port = mlx4_slave_convert_port(
2154 dev, slave, port);
2155
2156 if (port < 0)
2157 return -EINVAL;
2158 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2159 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2160 slave_state[slave].old_vlan_api = true;
2161 return 0;
2162 }
2163
2164 vlan = (u16) in_param;
2165
2166 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2167 if (!err) {
2168 set_param_l(out_param, (u32) vlan_index);
2169 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2170 if (err)
2171 __mlx4_unregister_vlan(dev, port, vlan);
2172 }
2173 return err;
2174 }
2175
counter_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2176 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2177 u64 in_param, u64 *out_param, int port)
2178 {
2179 u32 index;
2180 int err;
2181
2182 if (op != RES_OP_RESERVE)
2183 return -EINVAL;
2184
2185 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2186 if (err)
2187 return err;
2188
2189 err = __mlx4_counter_alloc(dev, &index);
2190 if (err) {
2191 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2192 return err;
2193 }
2194
2195 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2196 if (err) {
2197 __mlx4_counter_free(dev, index);
2198 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2199 } else {
2200 set_param_l(out_param, index);
2201 }
2202
2203 return err;
2204 }
2205
xrcdn_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2206 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2207 u64 in_param, u64 *out_param)
2208 {
2209 u32 xrcdn;
2210 int err;
2211
2212 if (op != RES_OP_RESERVE)
2213 return -EINVAL;
2214
2215 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2216 if (err)
2217 return err;
2218
2219 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2220 if (err)
2221 __mlx4_xrcd_free(dev, xrcdn);
2222 else
2223 set_param_l(out_param, xrcdn);
2224
2225 return err;
2226 }
2227
mlx4_ALLOC_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2228 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2229 struct mlx4_vhcr *vhcr,
2230 struct mlx4_cmd_mailbox *inbox,
2231 struct mlx4_cmd_mailbox *outbox,
2232 struct mlx4_cmd_info *cmd)
2233 {
2234 int err;
2235 int alop = vhcr->op_modifier;
2236
2237 switch (vhcr->in_modifier & 0xFF) {
2238 case RES_QP:
2239 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2240 vhcr->in_param, &vhcr->out_param);
2241 break;
2242
2243 case RES_MTT:
2244 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2245 vhcr->in_param, &vhcr->out_param);
2246 break;
2247
2248 case RES_MPT:
2249 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2250 vhcr->in_param, &vhcr->out_param);
2251 break;
2252
2253 case RES_CQ:
2254 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2255 vhcr->in_param, &vhcr->out_param);
2256 break;
2257
2258 case RES_SRQ:
2259 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2260 vhcr->in_param, &vhcr->out_param);
2261 break;
2262
2263 case RES_MAC:
2264 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2265 vhcr->in_param, &vhcr->out_param,
2266 (vhcr->in_modifier >> 8) & 0xFF);
2267 break;
2268
2269 case RES_VLAN:
2270 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2271 vhcr->in_param, &vhcr->out_param,
2272 (vhcr->in_modifier >> 8) & 0xFF);
2273 break;
2274
2275 case RES_COUNTER:
2276 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2277 vhcr->in_param, &vhcr->out_param, 0);
2278 break;
2279
2280 case RES_XRCD:
2281 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2282 vhcr->in_param, &vhcr->out_param);
2283 break;
2284
2285 default:
2286 err = -EINVAL;
2287 break;
2288 }
2289
2290 return err;
2291 }
2292
qp_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2293 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2294 u64 in_param)
2295 {
2296 int err;
2297 int count;
2298 int base;
2299 int qpn;
2300
2301 switch (op) {
2302 case RES_OP_RESERVE:
2303 base = get_param_l(&in_param) & 0x7fffff;
2304 count = get_param_h(&in_param);
2305 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2306 if (err)
2307 break;
2308 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2309 __mlx4_qp_release_range(dev, base, count);
2310 break;
2311 case RES_OP_MAP_ICM:
2312 qpn = get_param_l(&in_param) & 0x7fffff;
2313 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2314 NULL, 0);
2315 if (err)
2316 return err;
2317
2318 if (!fw_reserved(dev, qpn))
2319 __mlx4_qp_free_icm(dev, qpn);
2320
2321 res_end_move(dev, slave, RES_QP, qpn);
2322
2323 if (valid_reserved(dev, slave, qpn))
2324 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2325 break;
2326 default:
2327 err = -EINVAL;
2328 break;
2329 }
2330 return err;
2331 }
2332
mtt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2333 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2334 u64 in_param, u64 *out_param)
2335 {
2336 int err = -EINVAL;
2337 int base;
2338 int order;
2339
2340 if (op != RES_OP_RESERVE_AND_MAP)
2341 return err;
2342
2343 base = get_param_l(&in_param);
2344 order = get_param_h(&in_param);
2345 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2346 if (!err) {
2347 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2348 __mlx4_free_mtt_range(dev, base, order);
2349 }
2350 return err;
2351 }
2352
mpt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2353 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2354 u64 in_param)
2355 {
2356 int err = -EINVAL;
2357 int index;
2358 int id;
2359 struct res_mpt *mpt;
2360
2361 switch (op) {
2362 case RES_OP_RESERVE:
2363 index = get_param_l(&in_param);
2364 id = index & mpt_mask(dev);
2365 err = get_res(dev, slave, id, RES_MPT, &mpt);
2366 if (err)
2367 break;
2368 index = mpt->key;
2369 put_res(dev, slave, id, RES_MPT);
2370
2371 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2372 if (err)
2373 break;
2374 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2375 __mlx4_mpt_release(dev, index);
2376 break;
2377 case RES_OP_MAP_ICM:
2378 index = get_param_l(&in_param);
2379 id = index & mpt_mask(dev);
2380 err = mr_res_start_move_to(dev, slave, id,
2381 RES_MPT_RESERVED, &mpt);
2382 if (err)
2383 return err;
2384
2385 __mlx4_mpt_free_icm(dev, mpt->key);
2386 res_end_move(dev, slave, RES_MPT, id);
2387 return err;
2388 break;
2389 default:
2390 err = -EINVAL;
2391 break;
2392 }
2393 return err;
2394 }
2395
cq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2396 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2397 u64 in_param, u64 *out_param)
2398 {
2399 int cqn;
2400 int err;
2401
2402 switch (op) {
2403 case RES_OP_RESERVE_AND_MAP:
2404 cqn = get_param_l(&in_param);
2405 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2406 if (err)
2407 break;
2408
2409 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2410 __mlx4_cq_free_icm(dev, cqn);
2411 break;
2412
2413 default:
2414 err = -EINVAL;
2415 break;
2416 }
2417
2418 return err;
2419 }
2420
srq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2421 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2422 u64 in_param, u64 *out_param)
2423 {
2424 int srqn;
2425 int err;
2426
2427 switch (op) {
2428 case RES_OP_RESERVE_AND_MAP:
2429 srqn = get_param_l(&in_param);
2430 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2431 if (err)
2432 break;
2433
2434 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2435 __mlx4_srq_free_icm(dev, srqn);
2436 break;
2437
2438 default:
2439 err = -EINVAL;
2440 break;
2441 }
2442
2443 return err;
2444 }
2445
mac_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2446 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2447 u64 in_param, u64 *out_param, int in_port)
2448 {
2449 int port;
2450 int err = 0;
2451
2452 switch (op) {
2453 case RES_OP_RESERVE_AND_MAP:
2454 port = !in_port ? get_param_l(out_param) : in_port;
2455 port = mlx4_slave_convert_port(
2456 dev, slave, port);
2457
2458 if (port < 0)
2459 return -EINVAL;
2460 mac_del_from_slave(dev, slave, in_param, port);
2461 __mlx4_unregister_mac(dev, port, in_param);
2462 break;
2463 default:
2464 err = -EINVAL;
2465 break;
2466 }
2467
2468 return err;
2469
2470 }
2471
vlan_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2472 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2473 u64 in_param, u64 *out_param, int port)
2474 {
2475 struct mlx4_priv *priv = mlx4_priv(dev);
2476 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2477 int err = 0;
2478
2479 port = mlx4_slave_convert_port(
2480 dev, slave, port);
2481
2482 if (port < 0)
2483 return -EINVAL;
2484 switch (op) {
2485 case RES_OP_RESERVE_AND_MAP:
2486 if (slave_state[slave].old_vlan_api)
2487 return 0;
2488 if (!port)
2489 return -EINVAL;
2490 vlan_del_from_slave(dev, slave, in_param, port);
2491 __mlx4_unregister_vlan(dev, port, in_param);
2492 break;
2493 default:
2494 err = -EINVAL;
2495 break;
2496 }
2497
2498 return err;
2499 }
2500
counter_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2501 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2502 u64 in_param, u64 *out_param)
2503 {
2504 int index;
2505 int err;
2506
2507 if (op != RES_OP_RESERVE)
2508 return -EINVAL;
2509
2510 index = get_param_l(&in_param);
2511 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2512 return 0;
2513
2514 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2515 if (err)
2516 return err;
2517
2518 __mlx4_counter_free(dev, index);
2519 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2520
2521 return err;
2522 }
2523
xrcdn_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2524 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2525 u64 in_param, u64 *out_param)
2526 {
2527 int xrcdn;
2528 int err;
2529
2530 if (op != RES_OP_RESERVE)
2531 return -EINVAL;
2532
2533 xrcdn = get_param_l(&in_param);
2534 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2535 if (err)
2536 return err;
2537
2538 __mlx4_xrcd_free(dev, xrcdn);
2539
2540 return err;
2541 }
2542
mlx4_FREE_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2543 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2544 struct mlx4_vhcr *vhcr,
2545 struct mlx4_cmd_mailbox *inbox,
2546 struct mlx4_cmd_mailbox *outbox,
2547 struct mlx4_cmd_info *cmd)
2548 {
2549 int err = -EINVAL;
2550 int alop = vhcr->op_modifier;
2551
2552 switch (vhcr->in_modifier & 0xFF) {
2553 case RES_QP:
2554 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2555 vhcr->in_param);
2556 break;
2557
2558 case RES_MTT:
2559 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2560 vhcr->in_param, &vhcr->out_param);
2561 break;
2562
2563 case RES_MPT:
2564 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2565 vhcr->in_param);
2566 break;
2567
2568 case RES_CQ:
2569 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2570 vhcr->in_param, &vhcr->out_param);
2571 break;
2572
2573 case RES_SRQ:
2574 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2575 vhcr->in_param, &vhcr->out_param);
2576 break;
2577
2578 case RES_MAC:
2579 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2580 vhcr->in_param, &vhcr->out_param,
2581 (vhcr->in_modifier >> 8) & 0xFF);
2582 break;
2583
2584 case RES_VLAN:
2585 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2586 vhcr->in_param, &vhcr->out_param,
2587 (vhcr->in_modifier >> 8) & 0xFF);
2588 break;
2589
2590 case RES_COUNTER:
2591 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2592 vhcr->in_param, &vhcr->out_param);
2593 break;
2594
2595 case RES_XRCD:
2596 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2597 vhcr->in_param, &vhcr->out_param);
2598
2599 default:
2600 break;
2601 }
2602 return err;
2603 }
2604
2605 /* ugly but other choices are uglier */
mr_phys_mpt(struct mlx4_mpt_entry * mpt)2606 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2607 {
2608 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2609 }
2610
mr_get_mtt_addr(struct mlx4_mpt_entry * mpt)2611 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2612 {
2613 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2614 }
2615
mr_get_mtt_size(struct mlx4_mpt_entry * mpt)2616 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2617 {
2618 return be32_to_cpu(mpt->mtt_sz);
2619 }
2620
mr_get_pd(struct mlx4_mpt_entry * mpt)2621 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2622 {
2623 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2624 }
2625
mr_is_fmr(struct mlx4_mpt_entry * mpt)2626 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2627 {
2628 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2629 }
2630
mr_is_bind_enabled(struct mlx4_mpt_entry * mpt)2631 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2632 {
2633 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2634 }
2635
mr_is_region(struct mlx4_mpt_entry * mpt)2636 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2637 {
2638 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2639 }
2640
qp_get_mtt_addr(struct mlx4_qp_context * qpc)2641 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2642 {
2643 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2644 }
2645
srq_get_mtt_addr(struct mlx4_srq_context * srqc)2646 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2647 {
2648 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2649 }
2650
qp_get_mtt_size(struct mlx4_qp_context * qpc)2651 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2652 {
2653 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2654 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2655 int log_sq_sride = qpc->sq_size_stride & 7;
2656 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2657 int log_rq_stride = qpc->rq_size_stride & 7;
2658 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2659 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2660 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2661 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2662 int sq_size;
2663 int rq_size;
2664 int total_pages;
2665 int total_mem;
2666 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2667 int tot;
2668
2669 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2670 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2671 total_mem = sq_size + rq_size;
2672 tot = (total_mem + (page_offset << 6)) >> page_shift;
2673 total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2674
2675 return total_pages;
2676 }
2677
check_mtt_range(struct mlx4_dev * dev,int slave,int start,int size,struct res_mtt * mtt)2678 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2679 int size, struct res_mtt *mtt)
2680 {
2681 int res_start = mtt->com.res_id;
2682 int res_size = (1 << mtt->order);
2683
2684 if (start < res_start || start + size > res_start + res_size)
2685 return -EPERM;
2686 return 0;
2687 }
2688
mlx4_SW2HW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2689 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2690 struct mlx4_vhcr *vhcr,
2691 struct mlx4_cmd_mailbox *inbox,
2692 struct mlx4_cmd_mailbox *outbox,
2693 struct mlx4_cmd_info *cmd)
2694 {
2695 int err;
2696 int index = vhcr->in_modifier;
2697 struct res_mtt *mtt;
2698 struct res_mpt *mpt;
2699 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2700 int phys;
2701 int id;
2702 u32 pd;
2703 int pd_slave;
2704
2705 id = index & mpt_mask(dev);
2706 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2707 if (err)
2708 return err;
2709
2710 /* Disable memory windows for VFs. */
2711 if (!mr_is_region(inbox->buf)) {
2712 err = -EPERM;
2713 goto ex_abort;
2714 }
2715
2716 /* Make sure that the PD bits related to the slave id are zeros. */
2717 pd = mr_get_pd(inbox->buf);
2718 pd_slave = (pd >> 17) & 0x7f;
2719 if (pd_slave != 0 && --pd_slave != slave) {
2720 err = -EPERM;
2721 goto ex_abort;
2722 }
2723
2724 if (mr_is_fmr(inbox->buf)) {
2725 /* FMR and Bind Enable are forbidden in slave devices. */
2726 if (mr_is_bind_enabled(inbox->buf)) {
2727 err = -EPERM;
2728 goto ex_abort;
2729 }
2730 /* FMR and Memory Windows are also forbidden. */
2731 if (!mr_is_region(inbox->buf)) {
2732 err = -EPERM;
2733 goto ex_abort;
2734 }
2735 }
2736
2737 phys = mr_phys_mpt(inbox->buf);
2738 if (!phys) {
2739 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2740 if (err)
2741 goto ex_abort;
2742
2743 err = check_mtt_range(dev, slave, mtt_base,
2744 mr_get_mtt_size(inbox->buf), mtt);
2745 if (err)
2746 goto ex_put;
2747
2748 mpt->mtt = mtt;
2749 }
2750
2751 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2752 if (err)
2753 goto ex_put;
2754
2755 if (!phys) {
2756 atomic_inc(&mtt->ref_count);
2757 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2758 }
2759
2760 res_end_move(dev, slave, RES_MPT, id);
2761 return 0;
2762
2763 ex_put:
2764 if (!phys)
2765 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2766 ex_abort:
2767 res_abort_move(dev, slave, RES_MPT, id);
2768
2769 return err;
2770 }
2771
mlx4_HW2SW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2772 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2773 struct mlx4_vhcr *vhcr,
2774 struct mlx4_cmd_mailbox *inbox,
2775 struct mlx4_cmd_mailbox *outbox,
2776 struct mlx4_cmd_info *cmd)
2777 {
2778 int err;
2779 int index = vhcr->in_modifier;
2780 struct res_mpt *mpt;
2781 int id;
2782
2783 id = index & mpt_mask(dev);
2784 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2785 if (err)
2786 return err;
2787
2788 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2789 if (err)
2790 goto ex_abort;
2791
2792 if (mpt->mtt)
2793 atomic_dec(&mpt->mtt->ref_count);
2794
2795 res_end_move(dev, slave, RES_MPT, id);
2796 return 0;
2797
2798 ex_abort:
2799 res_abort_move(dev, slave, RES_MPT, id);
2800
2801 return err;
2802 }
2803
mlx4_QUERY_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2804 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2805 struct mlx4_vhcr *vhcr,
2806 struct mlx4_cmd_mailbox *inbox,
2807 struct mlx4_cmd_mailbox *outbox,
2808 struct mlx4_cmd_info *cmd)
2809 {
2810 int err;
2811 int index = vhcr->in_modifier;
2812 struct res_mpt *mpt;
2813 int id;
2814
2815 id = index & mpt_mask(dev);
2816 err = get_res(dev, slave, id, RES_MPT, &mpt);
2817 if (err)
2818 return err;
2819
2820 if (mpt->com.from_state == RES_MPT_MAPPED) {
2821 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2822 * that, the VF must read the MPT. But since the MPT entry memory is not
2823 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2824 * entry contents. To guarantee that the MPT cannot be changed, the driver
2825 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2826 * ownership fofollowing the change. The change here allows the VF to
2827 * perform QUERY_MPT also when the entry is in SW ownership.
2828 */
2829 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2830 &mlx4_priv(dev)->mr_table.dmpt_table,
2831 mpt->key, NULL);
2832
2833 if (NULL == mpt_entry || NULL == outbox->buf) {
2834 err = -EINVAL;
2835 goto out;
2836 }
2837
2838 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2839
2840 err = 0;
2841 } else if (mpt->com.from_state == RES_MPT_HW) {
2842 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2843 } else {
2844 err = -EBUSY;
2845 goto out;
2846 }
2847
2848
2849 out:
2850 put_res(dev, slave, id, RES_MPT);
2851 return err;
2852 }
2853
qp_get_rcqn(struct mlx4_qp_context * qpc)2854 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2855 {
2856 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2857 }
2858
qp_get_scqn(struct mlx4_qp_context * qpc)2859 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2860 {
2861 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2862 }
2863
qp_get_srqn(struct mlx4_qp_context * qpc)2864 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2865 {
2866 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2867 }
2868
adjust_proxy_tun_qkey(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_qp_context * context)2869 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2870 struct mlx4_qp_context *context)
2871 {
2872 u32 qpn = vhcr->in_modifier & 0xffffff;
2873 u32 qkey = 0;
2874
2875 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2876 return;
2877
2878 /* adjust qkey in qp context */
2879 context->qkey = cpu_to_be32(qkey);
2880 }
2881
2882 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2883 struct mlx4_qp_context *qpc,
2884 struct mlx4_cmd_mailbox *inbox);
2885
mlx4_RST2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2886 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2887 struct mlx4_vhcr *vhcr,
2888 struct mlx4_cmd_mailbox *inbox,
2889 struct mlx4_cmd_mailbox *outbox,
2890 struct mlx4_cmd_info *cmd)
2891 {
2892 int err;
2893 int qpn = vhcr->in_modifier & 0x7fffff;
2894 struct res_mtt *mtt;
2895 struct res_qp *qp;
2896 struct mlx4_qp_context *qpc = inbox->buf + 8;
2897 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2898 int mtt_size = qp_get_mtt_size(qpc);
2899 struct res_cq *rcq;
2900 struct res_cq *scq;
2901 int rcqn = qp_get_rcqn(qpc);
2902 int scqn = qp_get_scqn(qpc);
2903 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2904 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2905 struct res_srq *srq;
2906 int local_qpn = vhcr->in_modifier & 0xffffff;
2907
2908 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2909 if (err)
2910 return err;
2911
2912 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2913 if (err)
2914 return err;
2915 qp->local_qpn = local_qpn;
2916 qp->sched_queue = 0;
2917 qp->param3 = 0;
2918 qp->vlan_control = 0;
2919 qp->fvl_rx = 0;
2920 qp->pri_path_fl = 0;
2921 qp->vlan_index = 0;
2922 qp->feup = 0;
2923 qp->qpc_flags = be32_to_cpu(qpc->flags);
2924
2925 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2926 if (err)
2927 goto ex_abort;
2928
2929 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2930 if (err)
2931 goto ex_put_mtt;
2932
2933 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2934 if (err)
2935 goto ex_put_mtt;
2936
2937 if (scqn != rcqn) {
2938 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2939 if (err)
2940 goto ex_put_rcq;
2941 } else
2942 scq = rcq;
2943
2944 if (use_srq) {
2945 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2946 if (err)
2947 goto ex_put_scq;
2948 }
2949
2950 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2951 update_pkey_index(dev, slave, inbox);
2952 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2953 if (err)
2954 goto ex_put_srq;
2955 atomic_inc(&mtt->ref_count);
2956 qp->mtt = mtt;
2957 atomic_inc(&rcq->ref_count);
2958 qp->rcq = rcq;
2959 atomic_inc(&scq->ref_count);
2960 qp->scq = scq;
2961
2962 if (scqn != rcqn)
2963 put_res(dev, slave, scqn, RES_CQ);
2964
2965 if (use_srq) {
2966 atomic_inc(&srq->ref_count);
2967 put_res(dev, slave, srqn, RES_SRQ);
2968 qp->srq = srq;
2969 }
2970
2971 /* Save param3 for dynamic changes from VST back to VGT */
2972 qp->param3 = qpc->param3;
2973 put_res(dev, slave, rcqn, RES_CQ);
2974 put_res(dev, slave, mtt_base, RES_MTT);
2975 res_end_move(dev, slave, RES_QP, qpn);
2976
2977 return 0;
2978
2979 ex_put_srq:
2980 if (use_srq)
2981 put_res(dev, slave, srqn, RES_SRQ);
2982 ex_put_scq:
2983 if (scqn != rcqn)
2984 put_res(dev, slave, scqn, RES_CQ);
2985 ex_put_rcq:
2986 put_res(dev, slave, rcqn, RES_CQ);
2987 ex_put_mtt:
2988 put_res(dev, slave, mtt_base, RES_MTT);
2989 ex_abort:
2990 res_abort_move(dev, slave, RES_QP, qpn);
2991
2992 return err;
2993 }
2994
eq_get_mtt_addr(struct mlx4_eq_context * eqc)2995 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2996 {
2997 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2998 }
2999
eq_get_mtt_size(struct mlx4_eq_context * eqc)3000 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3001 {
3002 int log_eq_size = eqc->log_eq_size & 0x1f;
3003 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3004
3005 if (log_eq_size + 5 < page_shift)
3006 return 1;
3007
3008 return 1 << (log_eq_size + 5 - page_shift);
3009 }
3010
cq_get_mtt_addr(struct mlx4_cq_context * cqc)3011 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3012 {
3013 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3014 }
3015
cq_get_mtt_size(struct mlx4_cq_context * cqc)3016 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3017 {
3018 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3019 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3020
3021 if (log_cq_size + 5 < page_shift)
3022 return 1;
3023
3024 return 1 << (log_cq_size + 5 - page_shift);
3025 }
3026
mlx4_SW2HW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3027 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3028 struct mlx4_vhcr *vhcr,
3029 struct mlx4_cmd_mailbox *inbox,
3030 struct mlx4_cmd_mailbox *outbox,
3031 struct mlx4_cmd_info *cmd)
3032 {
3033 int err;
3034 int eqn = vhcr->in_modifier;
3035 int res_id = (slave << 10) | eqn;
3036 struct mlx4_eq_context *eqc = inbox->buf;
3037 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3038 int mtt_size = eq_get_mtt_size(eqc);
3039 struct res_eq *eq;
3040 struct res_mtt *mtt;
3041
3042 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3043 if (err)
3044 return err;
3045 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3046 if (err)
3047 goto out_add;
3048
3049 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3050 if (err)
3051 goto out_move;
3052
3053 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3054 if (err)
3055 goto out_put;
3056
3057 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3058 if (err)
3059 goto out_put;
3060
3061 atomic_inc(&mtt->ref_count);
3062 eq->mtt = mtt;
3063 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3064 res_end_move(dev, slave, RES_EQ, res_id);
3065 return 0;
3066
3067 out_put:
3068 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3069 out_move:
3070 res_abort_move(dev, slave, RES_EQ, res_id);
3071 out_add:
3072 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3073 return err;
3074 }
3075
mlx4_CONFIG_DEV_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3076 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3077 struct mlx4_vhcr *vhcr,
3078 struct mlx4_cmd_mailbox *inbox,
3079 struct mlx4_cmd_mailbox *outbox,
3080 struct mlx4_cmd_info *cmd)
3081 {
3082 int err;
3083 u8 get = vhcr->op_modifier;
3084
3085 if (get != 1)
3086 return -EPERM;
3087
3088 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3089
3090 return err;
3091 }
3092
get_containing_mtt(struct mlx4_dev * dev,int slave,int start,int len,struct res_mtt ** res)3093 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3094 int len, struct res_mtt **res)
3095 {
3096 struct mlx4_priv *priv = mlx4_priv(dev);
3097 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3098 struct res_mtt *mtt;
3099 int err = -EINVAL;
3100
3101 spin_lock_irq(mlx4_tlock(dev));
3102 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3103 com.list) {
3104 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3105 *res = mtt;
3106 mtt->com.from_state = mtt->com.state;
3107 mtt->com.state = RES_MTT_BUSY;
3108 err = 0;
3109 break;
3110 }
3111 }
3112 spin_unlock_irq(mlx4_tlock(dev));
3113
3114 return err;
3115 }
3116
verify_qp_parameters(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,enum qp_transition transition,u8 slave)3117 static int verify_qp_parameters(struct mlx4_dev *dev,
3118 struct mlx4_vhcr *vhcr,
3119 struct mlx4_cmd_mailbox *inbox,
3120 enum qp_transition transition, u8 slave)
3121 {
3122 u32 qp_type;
3123 u32 qpn;
3124 struct mlx4_qp_context *qp_ctx;
3125 enum mlx4_qp_optpar optpar;
3126 int port;
3127 int num_gids;
3128
3129 qp_ctx = inbox->buf + 8;
3130 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3131 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3132
3133 if (slave != mlx4_master_func_num(dev)) {
3134 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3135 /* setting QP rate-limit is disallowed for VFs */
3136 if (qp_ctx->rate_limit_params)
3137 return -EPERM;
3138 }
3139
3140 switch (qp_type) {
3141 case MLX4_QP_ST_RC:
3142 case MLX4_QP_ST_XRC:
3143 case MLX4_QP_ST_UC:
3144 switch (transition) {
3145 case QP_TRANS_INIT2RTR:
3146 case QP_TRANS_RTR2RTS:
3147 case QP_TRANS_RTS2RTS:
3148 case QP_TRANS_SQD2SQD:
3149 case QP_TRANS_SQD2RTS:
3150 if (slave != mlx4_master_func_num(dev)) {
3151 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3152 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3153 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3154 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3155 else
3156 num_gids = 1;
3157 if (qp_ctx->pri_path.mgid_index >= num_gids)
3158 return -EINVAL;
3159 }
3160 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3161 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3162 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3163 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3164 else
3165 num_gids = 1;
3166 if (qp_ctx->alt_path.mgid_index >= num_gids)
3167 return -EINVAL;
3168 }
3169 }
3170 break;
3171 default:
3172 break;
3173 }
3174 break;
3175
3176 case MLX4_QP_ST_MLX:
3177 qpn = vhcr->in_modifier & 0x7fffff;
3178 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3179 if (transition == QP_TRANS_INIT2RTR &&
3180 slave != mlx4_master_func_num(dev) &&
3181 mlx4_is_qp_reserved(dev, qpn) &&
3182 !mlx4_vf_smi_enabled(dev, slave, port)) {
3183 /* only enabled VFs may create MLX proxy QPs */
3184 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3185 __func__, slave, port);
3186 return -EPERM;
3187 }
3188 break;
3189
3190 default:
3191 break;
3192 }
3193
3194 return 0;
3195 }
3196
mlx4_WRITE_MTT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3197 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3198 struct mlx4_vhcr *vhcr,
3199 struct mlx4_cmd_mailbox *inbox,
3200 struct mlx4_cmd_mailbox *outbox,
3201 struct mlx4_cmd_info *cmd)
3202 {
3203 struct mlx4_mtt mtt;
3204 __be64 *page_list = inbox->buf;
3205 u64 *pg_list = (u64 *)page_list;
3206 int i;
3207 struct res_mtt *rmtt = NULL;
3208 int start = be64_to_cpu(page_list[0]);
3209 int npages = vhcr->in_modifier;
3210 int err;
3211
3212 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3213 if (err)
3214 return err;
3215
3216 /* Call the SW implementation of write_mtt:
3217 * - Prepare a dummy mtt struct
3218 * - Translate inbox contents to simple addresses in host endianness */
3219 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3220 we don't really use it */
3221 mtt.order = 0;
3222 mtt.page_shift = 0;
3223 for (i = 0; i < npages; ++i)
3224 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3225
3226 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3227 ((u64 *)page_list + 2));
3228
3229 if (rmtt)
3230 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3231
3232 return err;
3233 }
3234
mlx4_HW2SW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3235 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3236 struct mlx4_vhcr *vhcr,
3237 struct mlx4_cmd_mailbox *inbox,
3238 struct mlx4_cmd_mailbox *outbox,
3239 struct mlx4_cmd_info *cmd)
3240 {
3241 int eqn = vhcr->in_modifier;
3242 int res_id = eqn | (slave << 10);
3243 struct res_eq *eq;
3244 int err;
3245
3246 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3247 if (err)
3248 return err;
3249
3250 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3251 if (err)
3252 goto ex_abort;
3253
3254 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3255 if (err)
3256 goto ex_put;
3257
3258 atomic_dec(&eq->mtt->ref_count);
3259 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3260 res_end_move(dev, slave, RES_EQ, res_id);
3261 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3262
3263 return 0;
3264
3265 ex_put:
3266 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3267 ex_abort:
3268 res_abort_move(dev, slave, RES_EQ, res_id);
3269
3270 return err;
3271 }
3272
mlx4_GEN_EQE(struct mlx4_dev * dev,int slave,struct mlx4_eqe * eqe)3273 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3274 {
3275 struct mlx4_priv *priv = mlx4_priv(dev);
3276 struct mlx4_slave_event_eq_info *event_eq;
3277 struct mlx4_cmd_mailbox *mailbox;
3278 u32 in_modifier = 0;
3279 int err;
3280 int res_id;
3281 struct res_eq *req;
3282
3283 if (!priv->mfunc.master.slave_state)
3284 return -EINVAL;
3285
3286 /* check for slave valid, slave not PF, and slave active */
3287 if (slave < 0 || slave > dev->persist->num_vfs ||
3288 slave == dev->caps.function ||
3289 !priv->mfunc.master.slave_state[slave].active)
3290 return 0;
3291
3292 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3293
3294 /* Create the event only if the slave is registered */
3295 if (event_eq->eqn < 0)
3296 return 0;
3297
3298 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3299 res_id = (slave << 10) | event_eq->eqn;
3300 err = get_res(dev, slave, res_id, RES_EQ, &req);
3301 if (err)
3302 goto unlock;
3303
3304 if (req->com.from_state != RES_EQ_HW) {
3305 err = -EINVAL;
3306 goto put;
3307 }
3308
3309 mailbox = mlx4_alloc_cmd_mailbox(dev);
3310 if (IS_ERR(mailbox)) {
3311 err = PTR_ERR(mailbox);
3312 goto put;
3313 }
3314
3315 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3316 ++event_eq->token;
3317 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3318 }
3319
3320 memcpy(mailbox->buf, (u8 *) eqe, 28);
3321
3322 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3323
3324 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3325 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3326 MLX4_CMD_NATIVE);
3327
3328 put_res(dev, slave, res_id, RES_EQ);
3329 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3330 mlx4_free_cmd_mailbox(dev, mailbox);
3331 return err;
3332
3333 put:
3334 put_res(dev, slave, res_id, RES_EQ);
3335
3336 unlock:
3337 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3338 return err;
3339 }
3340
mlx4_QUERY_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3341 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3342 struct mlx4_vhcr *vhcr,
3343 struct mlx4_cmd_mailbox *inbox,
3344 struct mlx4_cmd_mailbox *outbox,
3345 struct mlx4_cmd_info *cmd)
3346 {
3347 int eqn = vhcr->in_modifier;
3348 int res_id = eqn | (slave << 10);
3349 struct res_eq *eq;
3350 int err;
3351
3352 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3353 if (err)
3354 return err;
3355
3356 if (eq->com.from_state != RES_EQ_HW) {
3357 err = -EINVAL;
3358 goto ex_put;
3359 }
3360
3361 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3362
3363 ex_put:
3364 put_res(dev, slave, res_id, RES_EQ);
3365 return err;
3366 }
3367
mlx4_SW2HW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3368 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3369 struct mlx4_vhcr *vhcr,
3370 struct mlx4_cmd_mailbox *inbox,
3371 struct mlx4_cmd_mailbox *outbox,
3372 struct mlx4_cmd_info *cmd)
3373 {
3374 int err;
3375 int cqn = vhcr->in_modifier;
3376 struct mlx4_cq_context *cqc = inbox->buf;
3377 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3378 struct res_cq *cq = NULL;
3379 struct res_mtt *mtt;
3380
3381 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3382 if (err)
3383 return err;
3384 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3385 if (err)
3386 goto out_move;
3387 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3388 if (err)
3389 goto out_put;
3390 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3391 if (err)
3392 goto out_put;
3393 atomic_inc(&mtt->ref_count);
3394 cq->mtt = mtt;
3395 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3396 res_end_move(dev, slave, RES_CQ, cqn);
3397 return 0;
3398
3399 out_put:
3400 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3401 out_move:
3402 res_abort_move(dev, slave, RES_CQ, cqn);
3403 return err;
3404 }
3405
mlx4_HW2SW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3406 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3407 struct mlx4_vhcr *vhcr,
3408 struct mlx4_cmd_mailbox *inbox,
3409 struct mlx4_cmd_mailbox *outbox,
3410 struct mlx4_cmd_info *cmd)
3411 {
3412 int err;
3413 int cqn = vhcr->in_modifier;
3414 struct res_cq *cq = NULL;
3415
3416 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3417 if (err)
3418 return err;
3419 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3420 if (err)
3421 goto out_move;
3422 atomic_dec(&cq->mtt->ref_count);
3423 res_end_move(dev, slave, RES_CQ, cqn);
3424 return 0;
3425
3426 out_move:
3427 res_abort_move(dev, slave, RES_CQ, cqn);
3428 return err;
3429 }
3430
mlx4_QUERY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3431 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3432 struct mlx4_vhcr *vhcr,
3433 struct mlx4_cmd_mailbox *inbox,
3434 struct mlx4_cmd_mailbox *outbox,
3435 struct mlx4_cmd_info *cmd)
3436 {
3437 int cqn = vhcr->in_modifier;
3438 struct res_cq *cq;
3439 int err;
3440
3441 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3442 if (err)
3443 return err;
3444
3445 if (cq->com.from_state != RES_CQ_HW)
3446 goto ex_put;
3447
3448 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3449 ex_put:
3450 put_res(dev, slave, cqn, RES_CQ);
3451
3452 return err;
3453 }
3454
handle_resize(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd,struct res_cq * cq)3455 static int handle_resize(struct mlx4_dev *dev, int slave,
3456 struct mlx4_vhcr *vhcr,
3457 struct mlx4_cmd_mailbox *inbox,
3458 struct mlx4_cmd_mailbox *outbox,
3459 struct mlx4_cmd_info *cmd,
3460 struct res_cq *cq)
3461 {
3462 int err;
3463 struct res_mtt *orig_mtt;
3464 struct res_mtt *mtt;
3465 struct mlx4_cq_context *cqc = inbox->buf;
3466 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3467
3468 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3469 if (err)
3470 return err;
3471
3472 if (orig_mtt != cq->mtt) {
3473 err = -EINVAL;
3474 goto ex_put;
3475 }
3476
3477 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3478 if (err)
3479 goto ex_put;
3480
3481 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3482 if (err)
3483 goto ex_put1;
3484 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3485 if (err)
3486 goto ex_put1;
3487 atomic_dec(&orig_mtt->ref_count);
3488 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3489 atomic_inc(&mtt->ref_count);
3490 cq->mtt = mtt;
3491 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3492 return 0;
3493
3494 ex_put1:
3495 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3496 ex_put:
3497 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3498
3499 return err;
3500
3501 }
3502
mlx4_MODIFY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3503 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3504 struct mlx4_vhcr *vhcr,
3505 struct mlx4_cmd_mailbox *inbox,
3506 struct mlx4_cmd_mailbox *outbox,
3507 struct mlx4_cmd_info *cmd)
3508 {
3509 int cqn = vhcr->in_modifier;
3510 struct res_cq *cq;
3511 int err;
3512
3513 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3514 if (err)
3515 return err;
3516
3517 if (cq->com.from_state != RES_CQ_HW)
3518 goto ex_put;
3519
3520 if (vhcr->op_modifier == 0) {
3521 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3522 goto ex_put;
3523 }
3524
3525 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3526 ex_put:
3527 put_res(dev, slave, cqn, RES_CQ);
3528
3529 return err;
3530 }
3531
srq_get_mtt_size(struct mlx4_srq_context * srqc)3532 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3533 {
3534 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3535 int log_rq_stride = srqc->logstride & 7;
3536 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3537
3538 if (log_srq_size + log_rq_stride + 4 < page_shift)
3539 return 1;
3540
3541 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3542 }
3543
mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3544 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3545 struct mlx4_vhcr *vhcr,
3546 struct mlx4_cmd_mailbox *inbox,
3547 struct mlx4_cmd_mailbox *outbox,
3548 struct mlx4_cmd_info *cmd)
3549 {
3550 int err;
3551 int srqn = vhcr->in_modifier;
3552 struct res_mtt *mtt;
3553 struct res_srq *srq = NULL;
3554 struct mlx4_srq_context *srqc = inbox->buf;
3555 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3556
3557 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3558 return -EINVAL;
3559
3560 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3561 if (err)
3562 return err;
3563 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3564 if (err)
3565 goto ex_abort;
3566 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3567 mtt);
3568 if (err)
3569 goto ex_put_mtt;
3570
3571 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3572 if (err)
3573 goto ex_put_mtt;
3574
3575 atomic_inc(&mtt->ref_count);
3576 srq->mtt = mtt;
3577 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3578 res_end_move(dev, slave, RES_SRQ, srqn);
3579 return 0;
3580
3581 ex_put_mtt:
3582 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3583 ex_abort:
3584 res_abort_move(dev, slave, RES_SRQ, srqn);
3585
3586 return err;
3587 }
3588
mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3589 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3590 struct mlx4_vhcr *vhcr,
3591 struct mlx4_cmd_mailbox *inbox,
3592 struct mlx4_cmd_mailbox *outbox,
3593 struct mlx4_cmd_info *cmd)
3594 {
3595 int err;
3596 int srqn = vhcr->in_modifier;
3597 struct res_srq *srq = NULL;
3598
3599 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3600 if (err)
3601 return err;
3602 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3603 if (err)
3604 goto ex_abort;
3605 atomic_dec(&srq->mtt->ref_count);
3606 if (srq->cq)
3607 atomic_dec(&srq->cq->ref_count);
3608 res_end_move(dev, slave, RES_SRQ, srqn);
3609
3610 return 0;
3611
3612 ex_abort:
3613 res_abort_move(dev, slave, RES_SRQ, srqn);
3614
3615 return err;
3616 }
3617
mlx4_QUERY_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3618 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3619 struct mlx4_vhcr *vhcr,
3620 struct mlx4_cmd_mailbox *inbox,
3621 struct mlx4_cmd_mailbox *outbox,
3622 struct mlx4_cmd_info *cmd)
3623 {
3624 int err;
3625 int srqn = vhcr->in_modifier;
3626 struct res_srq *srq;
3627
3628 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3629 if (err)
3630 return err;
3631 if (srq->com.from_state != RES_SRQ_HW) {
3632 err = -EBUSY;
3633 goto out;
3634 }
3635 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3636 out:
3637 put_res(dev, slave, srqn, RES_SRQ);
3638 return err;
3639 }
3640
mlx4_ARM_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3641 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3642 struct mlx4_vhcr *vhcr,
3643 struct mlx4_cmd_mailbox *inbox,
3644 struct mlx4_cmd_mailbox *outbox,
3645 struct mlx4_cmd_info *cmd)
3646 {
3647 int err;
3648 int srqn = vhcr->in_modifier;
3649 struct res_srq *srq;
3650
3651 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3652 if (err)
3653 return err;
3654
3655 if (srq->com.from_state != RES_SRQ_HW) {
3656 err = -EBUSY;
3657 goto out;
3658 }
3659
3660 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3661 out:
3662 put_res(dev, slave, srqn, RES_SRQ);
3663 return err;
3664 }
3665
mlx4_GEN_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3666 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3667 struct mlx4_vhcr *vhcr,
3668 struct mlx4_cmd_mailbox *inbox,
3669 struct mlx4_cmd_mailbox *outbox,
3670 struct mlx4_cmd_info *cmd)
3671 {
3672 int err;
3673 int qpn = vhcr->in_modifier & 0x7fffff;
3674 struct res_qp *qp;
3675
3676 err = get_res(dev, slave, qpn, RES_QP, &qp);
3677 if (err)
3678 return err;
3679 if (qp->com.from_state != RES_QP_HW) {
3680 err = -EBUSY;
3681 goto out;
3682 }
3683
3684 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3685 out:
3686 put_res(dev, slave, qpn, RES_QP);
3687 return err;
3688 }
3689
mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3690 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3691 struct mlx4_vhcr *vhcr,
3692 struct mlx4_cmd_mailbox *inbox,
3693 struct mlx4_cmd_mailbox *outbox,
3694 struct mlx4_cmd_info *cmd)
3695 {
3696 struct mlx4_qp_context *context = inbox->buf + 8;
3697 adjust_proxy_tun_qkey(dev, vhcr, context);
3698 update_pkey_index(dev, slave, inbox);
3699 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3700 }
3701
adjust_qp_sched_queue(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3702 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3703 struct mlx4_qp_context *qpc,
3704 struct mlx4_cmd_mailbox *inbox)
3705 {
3706 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3707 u8 pri_sched_queue;
3708 int port = mlx4_slave_convert_port(
3709 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3710
3711 if (port < 0)
3712 return -EINVAL;
3713
3714 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3715 ((port & 1) << 6);
3716
3717 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3718 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3719 qpc->pri_path.sched_queue = pri_sched_queue;
3720 }
3721
3722 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3723 port = mlx4_slave_convert_port(
3724 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3725 + 1) - 1;
3726 if (port < 0)
3727 return -EINVAL;
3728 qpc->alt_path.sched_queue =
3729 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3730 (port & 1) << 6;
3731 }
3732 return 0;
3733 }
3734
roce_verify_mac(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3735 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3736 struct mlx4_qp_context *qpc,
3737 struct mlx4_cmd_mailbox *inbox)
3738 {
3739 u64 mac;
3740 int port;
3741 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3742 u8 sched = *(u8 *)(inbox->buf + 64);
3743 u8 smac_ix;
3744
3745 port = (sched >> 6 & 1) + 1;
3746 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3747 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3748 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3749 return -ENOENT;
3750 }
3751 return 0;
3752 }
3753
mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3754 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3755 struct mlx4_vhcr *vhcr,
3756 struct mlx4_cmd_mailbox *inbox,
3757 struct mlx4_cmd_mailbox *outbox,
3758 struct mlx4_cmd_info *cmd)
3759 {
3760 int err;
3761 struct mlx4_qp_context *qpc = inbox->buf + 8;
3762 int qpn = vhcr->in_modifier & 0x7fffff;
3763 struct res_qp *qp;
3764 u8 orig_sched_queue;
3765 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3766 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3767 u8 orig_pri_path_fl = qpc->pri_path.fl;
3768 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3769 u8 orig_feup = qpc->pri_path.feup;
3770
3771 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3772 if (err)
3773 return err;
3774 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3775 if (err)
3776 return err;
3777
3778 if (roce_verify_mac(dev, slave, qpc, inbox))
3779 return -EINVAL;
3780
3781 update_pkey_index(dev, slave, inbox);
3782 update_gid(dev, inbox, (u8)slave);
3783 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3784 orig_sched_queue = qpc->pri_path.sched_queue;
3785
3786 err = get_res(dev, slave, qpn, RES_QP, &qp);
3787 if (err)
3788 return err;
3789 if (qp->com.from_state != RES_QP_HW) {
3790 err = -EBUSY;
3791 goto out;
3792 }
3793
3794 err = update_vport_qp_param(dev, inbox, slave, qpn);
3795 if (err)
3796 goto out;
3797
3798 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3799 out:
3800 /* if no error, save sched queue value passed in by VF. This is
3801 * essentially the QOS value provided by the VF. This will be useful
3802 * if we allow dynamic changes from VST back to VGT
3803 */
3804 if (!err) {
3805 qp->sched_queue = orig_sched_queue;
3806 qp->vlan_control = orig_vlan_control;
3807 qp->fvl_rx = orig_fvl_rx;
3808 qp->pri_path_fl = orig_pri_path_fl;
3809 qp->vlan_index = orig_vlan_index;
3810 qp->feup = orig_feup;
3811 }
3812 put_res(dev, slave, qpn, RES_QP);
3813 return err;
3814 }
3815
mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3816 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3817 struct mlx4_vhcr *vhcr,
3818 struct mlx4_cmd_mailbox *inbox,
3819 struct mlx4_cmd_mailbox *outbox,
3820 struct mlx4_cmd_info *cmd)
3821 {
3822 int err;
3823 struct mlx4_qp_context *context = inbox->buf + 8;
3824
3825 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3826 if (err)
3827 return err;
3828 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3829 if (err)
3830 return err;
3831
3832 update_pkey_index(dev, slave, inbox);
3833 update_gid(dev, inbox, (u8)slave);
3834 adjust_proxy_tun_qkey(dev, vhcr, context);
3835 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3836 }
3837
mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3838 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3839 struct mlx4_vhcr *vhcr,
3840 struct mlx4_cmd_mailbox *inbox,
3841 struct mlx4_cmd_mailbox *outbox,
3842 struct mlx4_cmd_info *cmd)
3843 {
3844 int err;
3845 struct mlx4_qp_context *context = inbox->buf + 8;
3846
3847 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3848 if (err)
3849 return err;
3850 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3851 if (err)
3852 return err;
3853
3854 update_pkey_index(dev, slave, inbox);
3855 update_gid(dev, inbox, (u8)slave);
3856 adjust_proxy_tun_qkey(dev, vhcr, context);
3857 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3858 }
3859
3860
mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3861 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3862 struct mlx4_vhcr *vhcr,
3863 struct mlx4_cmd_mailbox *inbox,
3864 struct mlx4_cmd_mailbox *outbox,
3865 struct mlx4_cmd_info *cmd)
3866 {
3867 struct mlx4_qp_context *context = inbox->buf + 8;
3868 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3869 if (err)
3870 return err;
3871 adjust_proxy_tun_qkey(dev, vhcr, context);
3872 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3873 }
3874
mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3875 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3876 struct mlx4_vhcr *vhcr,
3877 struct mlx4_cmd_mailbox *inbox,
3878 struct mlx4_cmd_mailbox *outbox,
3879 struct mlx4_cmd_info *cmd)
3880 {
3881 int err;
3882 struct mlx4_qp_context *context = inbox->buf + 8;
3883
3884 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3885 if (err)
3886 return err;
3887 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3888 if (err)
3889 return err;
3890
3891 adjust_proxy_tun_qkey(dev, vhcr, context);
3892 update_gid(dev, inbox, (u8)slave);
3893 update_pkey_index(dev, slave, inbox);
3894 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3895 }
3896
mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3897 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3898 struct mlx4_vhcr *vhcr,
3899 struct mlx4_cmd_mailbox *inbox,
3900 struct mlx4_cmd_mailbox *outbox,
3901 struct mlx4_cmd_info *cmd)
3902 {
3903 int err;
3904 struct mlx4_qp_context *context = inbox->buf + 8;
3905
3906 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3907 if (err)
3908 return err;
3909 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3910 if (err)
3911 return err;
3912
3913 adjust_proxy_tun_qkey(dev, vhcr, context);
3914 update_gid(dev, inbox, (u8)slave);
3915 update_pkey_index(dev, slave, inbox);
3916 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3917 }
3918
mlx4_2RST_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3919 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3920 struct mlx4_vhcr *vhcr,
3921 struct mlx4_cmd_mailbox *inbox,
3922 struct mlx4_cmd_mailbox *outbox,
3923 struct mlx4_cmd_info *cmd)
3924 {
3925 int err;
3926 int qpn = vhcr->in_modifier & 0x7fffff;
3927 struct res_qp *qp;
3928
3929 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3930 if (err)
3931 return err;
3932 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3933 if (err)
3934 goto ex_abort;
3935
3936 atomic_dec(&qp->mtt->ref_count);
3937 atomic_dec(&qp->rcq->ref_count);
3938 atomic_dec(&qp->scq->ref_count);
3939 if (qp->srq)
3940 atomic_dec(&qp->srq->ref_count);
3941 res_end_move(dev, slave, RES_QP, qpn);
3942 return 0;
3943
3944 ex_abort:
3945 res_abort_move(dev, slave, RES_QP, qpn);
3946
3947 return err;
3948 }
3949
find_gid(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid)3950 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3951 struct res_qp *rqp, u8 *gid)
3952 {
3953 struct res_gid *res;
3954
3955 list_for_each_entry(res, &rqp->mcg_list, list) {
3956 if (!memcmp(res->gid, gid, 16))
3957 return res;
3958 }
3959 return NULL;
3960 }
3961
add_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 reg_id)3962 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3963 u8 *gid, enum mlx4_protocol prot,
3964 enum mlx4_steer_type steer, u64 reg_id)
3965 {
3966 struct res_gid *res;
3967 int err;
3968
3969 res = kzalloc(sizeof *res, GFP_KERNEL);
3970 if (!res)
3971 return -ENOMEM;
3972
3973 spin_lock_irq(&rqp->mcg_spl);
3974 if (find_gid(dev, slave, rqp, gid)) {
3975 kfree(res);
3976 err = -EEXIST;
3977 } else {
3978 memcpy(res->gid, gid, 16);
3979 res->prot = prot;
3980 res->steer = steer;
3981 res->reg_id = reg_id;
3982 list_add_tail(&res->list, &rqp->mcg_list);
3983 err = 0;
3984 }
3985 spin_unlock_irq(&rqp->mcg_spl);
3986
3987 return err;
3988 }
3989
rem_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 * reg_id)3990 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3991 u8 *gid, enum mlx4_protocol prot,
3992 enum mlx4_steer_type steer, u64 *reg_id)
3993 {
3994 struct res_gid *res;
3995 int err;
3996
3997 spin_lock_irq(&rqp->mcg_spl);
3998 res = find_gid(dev, slave, rqp, gid);
3999 if (!res || res->prot != prot || res->steer != steer)
4000 err = -EINVAL;
4001 else {
4002 *reg_id = res->reg_id;
4003 list_del(&res->list);
4004 kfree(res);
4005 err = 0;
4006 }
4007 spin_unlock_irq(&rqp->mcg_spl);
4008
4009 return err;
4010 }
4011
qp_attach(struct mlx4_dev * dev,int slave,struct mlx4_qp * qp,u8 gid[16],int block_loopback,enum mlx4_protocol prot,enum mlx4_steer_type type,u64 * reg_id)4012 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4013 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4014 enum mlx4_steer_type type, u64 *reg_id)
4015 {
4016 switch (dev->caps.steering_mode) {
4017 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4018 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4019 if (port < 0)
4020 return port;
4021 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4022 block_loopback, prot,
4023 reg_id);
4024 }
4025 case MLX4_STEERING_MODE_B0:
4026 if (prot == MLX4_PROT_ETH) {
4027 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4028 if (port < 0)
4029 return port;
4030 gid[5] = port;
4031 }
4032 return mlx4_qp_attach_common(dev, qp, gid,
4033 block_loopback, prot, type);
4034 default:
4035 return -EINVAL;
4036 }
4037 }
4038
qp_detach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot,enum mlx4_steer_type type,u64 reg_id)4039 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4040 u8 gid[16], enum mlx4_protocol prot,
4041 enum mlx4_steer_type type, u64 reg_id)
4042 {
4043 switch (dev->caps.steering_mode) {
4044 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4045 return mlx4_flow_detach(dev, reg_id);
4046 case MLX4_STEERING_MODE_B0:
4047 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4048 default:
4049 return -EINVAL;
4050 }
4051 }
4052
mlx4_adjust_port(struct mlx4_dev * dev,int slave,u8 * gid,enum mlx4_protocol prot)4053 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4054 u8 *gid, enum mlx4_protocol prot)
4055 {
4056 int real_port;
4057
4058 if (prot != MLX4_PROT_ETH)
4059 return 0;
4060
4061 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4062 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4063 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4064 if (real_port < 0)
4065 return -EINVAL;
4066 gid[5] = real_port;
4067 }
4068
4069 return 0;
4070 }
4071
mlx4_QP_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4072 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4073 struct mlx4_vhcr *vhcr,
4074 struct mlx4_cmd_mailbox *inbox,
4075 struct mlx4_cmd_mailbox *outbox,
4076 struct mlx4_cmd_info *cmd)
4077 {
4078 struct mlx4_qp qp; /* dummy for calling attach/detach */
4079 u8 *gid = inbox->buf;
4080 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4081 int err;
4082 int qpn;
4083 struct res_qp *rqp;
4084 u64 reg_id = 0;
4085 int attach = vhcr->op_modifier;
4086 int block_loopback = vhcr->in_modifier >> 31;
4087 u8 steer_type_mask = 2;
4088 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4089
4090 qpn = vhcr->in_modifier & 0xffffff;
4091 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4092 if (err)
4093 return err;
4094
4095 qp.qpn = qpn;
4096 if (attach) {
4097 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4098 type, ®_id);
4099 if (err) {
4100 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4101 goto ex_put;
4102 }
4103 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4104 if (err)
4105 goto ex_detach;
4106 } else {
4107 err = mlx4_adjust_port(dev, slave, gid, prot);
4108 if (err)
4109 goto ex_put;
4110
4111 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4112 if (err)
4113 goto ex_put;
4114
4115 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4116 if (err)
4117 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4118 qpn, reg_id);
4119 }
4120 put_res(dev, slave, qpn, RES_QP);
4121 return err;
4122
4123 ex_detach:
4124 qp_detach(dev, &qp, gid, prot, type, reg_id);
4125 ex_put:
4126 put_res(dev, slave, qpn, RES_QP);
4127 return err;
4128 }
4129
4130 /*
4131 * MAC validation for Flow Steering rules.
4132 * VF can attach rules only with a mac address which is assigned to it.
4133 */
validate_eth_header_mac(int slave,struct _rule_hw * eth_header,struct list_head * rlist)4134 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4135 struct list_head *rlist)
4136 {
4137 struct mac_res *res, *tmp;
4138 __be64 be_mac;
4139
4140 /* make sure it isn't multicast or broadcast mac*/
4141 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4142 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4143 list_for_each_entry_safe(res, tmp, rlist, list) {
4144 be_mac = cpu_to_be64(res->mac << 16);
4145 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4146 return 0;
4147 }
4148 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4149 eth_header->eth.dst_mac, slave);
4150 return -EINVAL;
4151 }
4152 return 0;
4153 }
4154
handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl * ctrl,struct _rule_hw * eth_header)4155 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4156 struct _rule_hw *eth_header)
4157 {
4158 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4159 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4160 struct mlx4_net_trans_rule_hw_eth *eth =
4161 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4162 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4163 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4164 next_rule->rsvd == 0;
4165
4166 if (last_rule)
4167 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4168 }
4169 }
4170
4171 /*
4172 * In case of missing eth header, append eth header with a MAC address
4173 * assigned to the VF.
4174 */
add_eth_header(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox,struct list_head * rlist,int header_id)4175 static int add_eth_header(struct mlx4_dev *dev, int slave,
4176 struct mlx4_cmd_mailbox *inbox,
4177 struct list_head *rlist, int header_id)
4178 {
4179 struct mac_res *res, *tmp;
4180 u8 port;
4181 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4182 struct mlx4_net_trans_rule_hw_eth *eth_header;
4183 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4184 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4185 __be64 be_mac = 0;
4186 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4187
4188 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4189 port = ctrl->port;
4190 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4191
4192 /* Clear a space in the inbox for eth header */
4193 switch (header_id) {
4194 case MLX4_NET_TRANS_RULE_ID_IPV4:
4195 ip_header =
4196 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4197 memmove(ip_header, eth_header,
4198 sizeof(*ip_header) + sizeof(*l4_header));
4199 break;
4200 case MLX4_NET_TRANS_RULE_ID_TCP:
4201 case MLX4_NET_TRANS_RULE_ID_UDP:
4202 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4203 (eth_header + 1);
4204 memmove(l4_header, eth_header, sizeof(*l4_header));
4205 break;
4206 default:
4207 return -EINVAL;
4208 }
4209 list_for_each_entry_safe(res, tmp, rlist, list) {
4210 if (port == res->port) {
4211 be_mac = cpu_to_be64(res->mac << 16);
4212 break;
4213 }
4214 }
4215 if (!be_mac) {
4216 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4217 port);
4218 return -EINVAL;
4219 }
4220
4221 memset(eth_header, 0, sizeof(*eth_header));
4222 eth_header->size = sizeof(*eth_header) >> 2;
4223 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4224 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4225 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4226
4227 return 0;
4228
4229 }
4230
4231 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4232 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4233 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
mlx4_UPDATE_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd_info)4234 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4235 struct mlx4_vhcr *vhcr,
4236 struct mlx4_cmd_mailbox *inbox,
4237 struct mlx4_cmd_mailbox *outbox,
4238 struct mlx4_cmd_info *cmd_info)
4239 {
4240 int err;
4241 u32 qpn = vhcr->in_modifier & 0xffffff;
4242 struct res_qp *rqp;
4243 u64 mac;
4244 unsigned port;
4245 u64 pri_addr_path_mask;
4246 struct mlx4_update_qp_context *cmd;
4247 int smac_index;
4248
4249 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4250
4251 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4252 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4253 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4254 return -EPERM;
4255
4256 if ((pri_addr_path_mask &
4257 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4258 !(dev->caps.flags2 &
4259 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4260 mlx4_warn(dev,
4261 "Src check LB for slave %d isn't supported\n",
4262 slave);
4263 return -ENOTSUPP;
4264 }
4265
4266 /* Just change the smac for the QP */
4267 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4268 if (err) {
4269 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4270 return err;
4271 }
4272
4273 port = (rqp->sched_queue >> 6 & 1) + 1;
4274
4275 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4276 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4277 err = mac_find_smac_ix_in_slave(dev, slave, port,
4278 smac_index, &mac);
4279
4280 if (err) {
4281 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4282 qpn, smac_index);
4283 goto err_mac;
4284 }
4285 }
4286
4287 err = mlx4_cmd(dev, inbox->dma,
4288 vhcr->in_modifier, 0,
4289 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4290 MLX4_CMD_NATIVE);
4291 if (err) {
4292 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4293 goto err_mac;
4294 }
4295
4296 err_mac:
4297 put_res(dev, slave, qpn, RES_QP);
4298 return err;
4299 }
4300
mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4301 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4302 struct mlx4_vhcr *vhcr,
4303 struct mlx4_cmd_mailbox *inbox,
4304 struct mlx4_cmd_mailbox *outbox,
4305 struct mlx4_cmd_info *cmd)
4306 {
4307
4308 struct mlx4_priv *priv = mlx4_priv(dev);
4309 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4310 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4311 int err;
4312 int qpn;
4313 struct res_qp *rqp;
4314 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4315 struct _rule_hw *rule_header;
4316 int header_id;
4317
4318 if (dev->caps.steering_mode !=
4319 MLX4_STEERING_MODE_DEVICE_MANAGED)
4320 return -EOPNOTSUPP;
4321
4322 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4323 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4324 if (err <= 0)
4325 return -EINVAL;
4326 ctrl->port = err;
4327 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4328 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4329 if (err) {
4330 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4331 return err;
4332 }
4333 rule_header = (struct _rule_hw *)(ctrl + 1);
4334 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4335
4336 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4337 handle_eth_header_mcast_prio(ctrl, rule_header);
4338
4339 if (slave == dev->caps.function)
4340 goto execute;
4341
4342 switch (header_id) {
4343 case MLX4_NET_TRANS_RULE_ID_ETH:
4344 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4345 err = -EINVAL;
4346 goto err_put;
4347 }
4348 break;
4349 case MLX4_NET_TRANS_RULE_ID_IB:
4350 break;
4351 case MLX4_NET_TRANS_RULE_ID_IPV4:
4352 case MLX4_NET_TRANS_RULE_ID_TCP:
4353 case MLX4_NET_TRANS_RULE_ID_UDP:
4354 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4355 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4356 err = -EINVAL;
4357 goto err_put;
4358 }
4359 vhcr->in_modifier +=
4360 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4361 break;
4362 default:
4363 pr_err("Corrupted mailbox\n");
4364 err = -EINVAL;
4365 goto err_put;
4366 }
4367
4368 execute:
4369 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4370 vhcr->in_modifier, 0,
4371 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4372 MLX4_CMD_NATIVE);
4373 if (err)
4374 goto err_put;
4375
4376 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4377 if (err) {
4378 mlx4_err(dev, "Fail to add flow steering resources\n");
4379 /* detach rule*/
4380 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4381 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4382 MLX4_CMD_NATIVE);
4383 goto err_put;
4384 }
4385 atomic_inc(&rqp->ref_count);
4386 err_put:
4387 put_res(dev, slave, qpn, RES_QP);
4388 return err;
4389 }
4390
mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4391 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4392 struct mlx4_vhcr *vhcr,
4393 struct mlx4_cmd_mailbox *inbox,
4394 struct mlx4_cmd_mailbox *outbox,
4395 struct mlx4_cmd_info *cmd)
4396 {
4397 int err;
4398 struct res_qp *rqp;
4399 struct res_fs_rule *rrule;
4400
4401 if (dev->caps.steering_mode !=
4402 MLX4_STEERING_MODE_DEVICE_MANAGED)
4403 return -EOPNOTSUPP;
4404
4405 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4406 if (err)
4407 return err;
4408 /* Release the rule form busy state before removal */
4409 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4410 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4411 if (err)
4412 return err;
4413
4414 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4415 if (err) {
4416 mlx4_err(dev, "Fail to remove flow steering resources\n");
4417 goto out;
4418 }
4419
4420 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4421 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4422 MLX4_CMD_NATIVE);
4423 if (!err)
4424 atomic_dec(&rqp->ref_count);
4425 out:
4426 put_res(dev, slave, rrule->qpn, RES_QP);
4427 return err;
4428 }
4429
4430 enum {
4431 BUSY_MAX_RETRIES = 10
4432 };
4433
mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4434 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4435 struct mlx4_vhcr *vhcr,
4436 struct mlx4_cmd_mailbox *inbox,
4437 struct mlx4_cmd_mailbox *outbox,
4438 struct mlx4_cmd_info *cmd)
4439 {
4440 int err;
4441 int index = vhcr->in_modifier & 0xffff;
4442
4443 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4444 if (err)
4445 return err;
4446
4447 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4448 put_res(dev, slave, index, RES_COUNTER);
4449 return err;
4450 }
4451
detach_qp(struct mlx4_dev * dev,int slave,struct res_qp * rqp)4452 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4453 {
4454 struct res_gid *rgid;
4455 struct res_gid *tmp;
4456 struct mlx4_qp qp; /* dummy for calling attach/detach */
4457
4458 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4459 switch (dev->caps.steering_mode) {
4460 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4461 mlx4_flow_detach(dev, rgid->reg_id);
4462 break;
4463 case MLX4_STEERING_MODE_B0:
4464 qp.qpn = rqp->local_qpn;
4465 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4466 rgid->prot, rgid->steer);
4467 break;
4468 }
4469 list_del(&rgid->list);
4470 kfree(rgid);
4471 }
4472 }
4473
_move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int print)4474 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4475 enum mlx4_resource type, int print)
4476 {
4477 struct mlx4_priv *priv = mlx4_priv(dev);
4478 struct mlx4_resource_tracker *tracker =
4479 &priv->mfunc.master.res_tracker;
4480 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4481 struct res_common *r;
4482 struct res_common *tmp;
4483 int busy;
4484
4485 busy = 0;
4486 spin_lock_irq(mlx4_tlock(dev));
4487 list_for_each_entry_safe(r, tmp, rlist, list) {
4488 if (r->owner == slave) {
4489 if (!r->removing) {
4490 if (r->state == RES_ANY_BUSY) {
4491 if (print)
4492 mlx4_dbg(dev,
4493 "%s id 0x%llx is busy\n",
4494 resource_str(type),
4495 r->res_id);
4496 ++busy;
4497 } else {
4498 r->from_state = r->state;
4499 r->state = RES_ANY_BUSY;
4500 r->removing = 1;
4501 }
4502 }
4503 }
4504 }
4505 spin_unlock_irq(mlx4_tlock(dev));
4506
4507 return busy;
4508 }
4509
move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type)4510 static int move_all_busy(struct mlx4_dev *dev, int slave,
4511 enum mlx4_resource type)
4512 {
4513 unsigned long begin;
4514 int busy;
4515
4516 begin = jiffies;
4517 do {
4518 busy = _move_all_busy(dev, slave, type, 0);
4519 if (time_after(jiffies, begin + 5 * HZ))
4520 break;
4521 if (busy)
4522 cond_resched();
4523 } while (busy);
4524
4525 if (busy)
4526 busy = _move_all_busy(dev, slave, type, 1);
4527
4528 return busy;
4529 }
rem_slave_qps(struct mlx4_dev * dev,int slave)4530 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4531 {
4532 struct mlx4_priv *priv = mlx4_priv(dev);
4533 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4534 struct list_head *qp_list =
4535 &tracker->slave_list[slave].res_list[RES_QP];
4536 struct res_qp *qp;
4537 struct res_qp *tmp;
4538 int state;
4539 u64 in_param;
4540 int qpn;
4541 int err;
4542
4543 err = move_all_busy(dev, slave, RES_QP);
4544 if (err)
4545 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4546 slave);
4547
4548 spin_lock_irq(mlx4_tlock(dev));
4549 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4550 spin_unlock_irq(mlx4_tlock(dev));
4551 if (qp->com.owner == slave) {
4552 qpn = qp->com.res_id;
4553 detach_qp(dev, slave, qp);
4554 state = qp->com.from_state;
4555 while (state != 0) {
4556 switch (state) {
4557 case RES_QP_RESERVED:
4558 spin_lock_irq(mlx4_tlock(dev));
4559 rb_erase(&qp->com.node,
4560 &tracker->res_tree[RES_QP]);
4561 list_del(&qp->com.list);
4562 spin_unlock_irq(mlx4_tlock(dev));
4563 if (!valid_reserved(dev, slave, qpn)) {
4564 __mlx4_qp_release_range(dev, qpn, 1);
4565 mlx4_release_resource(dev, slave,
4566 RES_QP, 1, 0);
4567 }
4568 kfree(qp);
4569 state = 0;
4570 break;
4571 case RES_QP_MAPPED:
4572 if (!valid_reserved(dev, slave, qpn))
4573 __mlx4_qp_free_icm(dev, qpn);
4574 state = RES_QP_RESERVED;
4575 break;
4576 case RES_QP_HW:
4577 in_param = slave;
4578 err = mlx4_cmd(dev, in_param,
4579 qp->local_qpn, 2,
4580 MLX4_CMD_2RST_QP,
4581 MLX4_CMD_TIME_CLASS_A,
4582 MLX4_CMD_NATIVE);
4583 if (err)
4584 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4585 slave, qp->local_qpn);
4586 atomic_dec(&qp->rcq->ref_count);
4587 atomic_dec(&qp->scq->ref_count);
4588 atomic_dec(&qp->mtt->ref_count);
4589 if (qp->srq)
4590 atomic_dec(&qp->srq->ref_count);
4591 state = RES_QP_MAPPED;
4592 break;
4593 default:
4594 state = 0;
4595 }
4596 }
4597 }
4598 spin_lock_irq(mlx4_tlock(dev));
4599 }
4600 spin_unlock_irq(mlx4_tlock(dev));
4601 }
4602
rem_slave_srqs(struct mlx4_dev * dev,int slave)4603 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4604 {
4605 struct mlx4_priv *priv = mlx4_priv(dev);
4606 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4607 struct list_head *srq_list =
4608 &tracker->slave_list[slave].res_list[RES_SRQ];
4609 struct res_srq *srq;
4610 struct res_srq *tmp;
4611 int state;
4612 u64 in_param;
4613 LIST_HEAD(tlist);
4614 int srqn;
4615 int err;
4616
4617 err = move_all_busy(dev, slave, RES_SRQ);
4618 if (err)
4619 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4620 slave);
4621
4622 spin_lock_irq(mlx4_tlock(dev));
4623 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4624 spin_unlock_irq(mlx4_tlock(dev));
4625 if (srq->com.owner == slave) {
4626 srqn = srq->com.res_id;
4627 state = srq->com.from_state;
4628 while (state != 0) {
4629 switch (state) {
4630 case RES_SRQ_ALLOCATED:
4631 __mlx4_srq_free_icm(dev, srqn);
4632 spin_lock_irq(mlx4_tlock(dev));
4633 rb_erase(&srq->com.node,
4634 &tracker->res_tree[RES_SRQ]);
4635 list_del(&srq->com.list);
4636 spin_unlock_irq(mlx4_tlock(dev));
4637 mlx4_release_resource(dev, slave,
4638 RES_SRQ, 1, 0);
4639 kfree(srq);
4640 state = 0;
4641 break;
4642
4643 case RES_SRQ_HW:
4644 in_param = slave;
4645 err = mlx4_cmd(dev, in_param, srqn, 1,
4646 MLX4_CMD_HW2SW_SRQ,
4647 MLX4_CMD_TIME_CLASS_A,
4648 MLX4_CMD_NATIVE);
4649 if (err)
4650 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4651 slave, srqn);
4652
4653 atomic_dec(&srq->mtt->ref_count);
4654 if (srq->cq)
4655 atomic_dec(&srq->cq->ref_count);
4656 state = RES_SRQ_ALLOCATED;
4657 break;
4658
4659 default:
4660 state = 0;
4661 }
4662 }
4663 }
4664 spin_lock_irq(mlx4_tlock(dev));
4665 }
4666 spin_unlock_irq(mlx4_tlock(dev));
4667 }
4668
rem_slave_cqs(struct mlx4_dev * dev,int slave)4669 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4670 {
4671 struct mlx4_priv *priv = mlx4_priv(dev);
4672 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4673 struct list_head *cq_list =
4674 &tracker->slave_list[slave].res_list[RES_CQ];
4675 struct res_cq *cq;
4676 struct res_cq *tmp;
4677 int state;
4678 u64 in_param;
4679 LIST_HEAD(tlist);
4680 int cqn;
4681 int err;
4682
4683 err = move_all_busy(dev, slave, RES_CQ);
4684 if (err)
4685 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4686 slave);
4687
4688 spin_lock_irq(mlx4_tlock(dev));
4689 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4690 spin_unlock_irq(mlx4_tlock(dev));
4691 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4692 cqn = cq->com.res_id;
4693 state = cq->com.from_state;
4694 while (state != 0) {
4695 switch (state) {
4696 case RES_CQ_ALLOCATED:
4697 __mlx4_cq_free_icm(dev, cqn);
4698 spin_lock_irq(mlx4_tlock(dev));
4699 rb_erase(&cq->com.node,
4700 &tracker->res_tree[RES_CQ]);
4701 list_del(&cq->com.list);
4702 spin_unlock_irq(mlx4_tlock(dev));
4703 mlx4_release_resource(dev, slave,
4704 RES_CQ, 1, 0);
4705 kfree(cq);
4706 state = 0;
4707 break;
4708
4709 case RES_CQ_HW:
4710 in_param = slave;
4711 err = mlx4_cmd(dev, in_param, cqn, 1,
4712 MLX4_CMD_HW2SW_CQ,
4713 MLX4_CMD_TIME_CLASS_A,
4714 MLX4_CMD_NATIVE);
4715 if (err)
4716 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4717 slave, cqn);
4718 atomic_dec(&cq->mtt->ref_count);
4719 state = RES_CQ_ALLOCATED;
4720 break;
4721
4722 default:
4723 state = 0;
4724 }
4725 }
4726 }
4727 spin_lock_irq(mlx4_tlock(dev));
4728 }
4729 spin_unlock_irq(mlx4_tlock(dev));
4730 }
4731
rem_slave_mrs(struct mlx4_dev * dev,int slave)4732 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4733 {
4734 struct mlx4_priv *priv = mlx4_priv(dev);
4735 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4736 struct list_head *mpt_list =
4737 &tracker->slave_list[slave].res_list[RES_MPT];
4738 struct res_mpt *mpt;
4739 struct res_mpt *tmp;
4740 int state;
4741 u64 in_param;
4742 LIST_HEAD(tlist);
4743 int mptn;
4744 int err;
4745
4746 err = move_all_busy(dev, slave, RES_MPT);
4747 if (err)
4748 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4749 slave);
4750
4751 spin_lock_irq(mlx4_tlock(dev));
4752 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4753 spin_unlock_irq(mlx4_tlock(dev));
4754 if (mpt->com.owner == slave) {
4755 mptn = mpt->com.res_id;
4756 state = mpt->com.from_state;
4757 while (state != 0) {
4758 switch (state) {
4759 case RES_MPT_RESERVED:
4760 __mlx4_mpt_release(dev, mpt->key);
4761 spin_lock_irq(mlx4_tlock(dev));
4762 rb_erase(&mpt->com.node,
4763 &tracker->res_tree[RES_MPT]);
4764 list_del(&mpt->com.list);
4765 spin_unlock_irq(mlx4_tlock(dev));
4766 mlx4_release_resource(dev, slave,
4767 RES_MPT, 1, 0);
4768 kfree(mpt);
4769 state = 0;
4770 break;
4771
4772 case RES_MPT_MAPPED:
4773 __mlx4_mpt_free_icm(dev, mpt->key);
4774 state = RES_MPT_RESERVED;
4775 break;
4776
4777 case RES_MPT_HW:
4778 in_param = slave;
4779 err = mlx4_cmd(dev, in_param, mptn, 0,
4780 MLX4_CMD_HW2SW_MPT,
4781 MLX4_CMD_TIME_CLASS_A,
4782 MLX4_CMD_NATIVE);
4783 if (err)
4784 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4785 slave, mptn);
4786 if (mpt->mtt)
4787 atomic_dec(&mpt->mtt->ref_count);
4788 state = RES_MPT_MAPPED;
4789 break;
4790 default:
4791 state = 0;
4792 }
4793 }
4794 }
4795 spin_lock_irq(mlx4_tlock(dev));
4796 }
4797 spin_unlock_irq(mlx4_tlock(dev));
4798 }
4799
rem_slave_mtts(struct mlx4_dev * dev,int slave)4800 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4801 {
4802 struct mlx4_priv *priv = mlx4_priv(dev);
4803 struct mlx4_resource_tracker *tracker =
4804 &priv->mfunc.master.res_tracker;
4805 struct list_head *mtt_list =
4806 &tracker->slave_list[slave].res_list[RES_MTT];
4807 struct res_mtt *mtt;
4808 struct res_mtt *tmp;
4809 int state;
4810 LIST_HEAD(tlist);
4811 int base;
4812 int err;
4813
4814 err = move_all_busy(dev, slave, RES_MTT);
4815 if (err)
4816 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4817 slave);
4818
4819 spin_lock_irq(mlx4_tlock(dev));
4820 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4821 spin_unlock_irq(mlx4_tlock(dev));
4822 if (mtt->com.owner == slave) {
4823 base = mtt->com.res_id;
4824 state = mtt->com.from_state;
4825 while (state != 0) {
4826 switch (state) {
4827 case RES_MTT_ALLOCATED:
4828 __mlx4_free_mtt_range(dev, base,
4829 mtt->order);
4830 spin_lock_irq(mlx4_tlock(dev));
4831 rb_erase(&mtt->com.node,
4832 &tracker->res_tree[RES_MTT]);
4833 list_del(&mtt->com.list);
4834 spin_unlock_irq(mlx4_tlock(dev));
4835 mlx4_release_resource(dev, slave, RES_MTT,
4836 1 << mtt->order, 0);
4837 kfree(mtt);
4838 state = 0;
4839 break;
4840
4841 default:
4842 state = 0;
4843 }
4844 }
4845 }
4846 spin_lock_irq(mlx4_tlock(dev));
4847 }
4848 spin_unlock_irq(mlx4_tlock(dev));
4849 }
4850
rem_slave_fs_rule(struct mlx4_dev * dev,int slave)4851 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4852 {
4853 struct mlx4_priv *priv = mlx4_priv(dev);
4854 struct mlx4_resource_tracker *tracker =
4855 &priv->mfunc.master.res_tracker;
4856 struct list_head *fs_rule_list =
4857 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4858 struct res_fs_rule *fs_rule;
4859 struct res_fs_rule *tmp;
4860 int state;
4861 u64 base;
4862 int err;
4863
4864 err = move_all_busy(dev, slave, RES_FS_RULE);
4865 if (err)
4866 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4867 slave);
4868
4869 spin_lock_irq(mlx4_tlock(dev));
4870 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4871 spin_unlock_irq(mlx4_tlock(dev));
4872 if (fs_rule->com.owner == slave) {
4873 base = fs_rule->com.res_id;
4874 state = fs_rule->com.from_state;
4875 while (state != 0) {
4876 switch (state) {
4877 case RES_FS_RULE_ALLOCATED:
4878 /* detach rule */
4879 err = mlx4_cmd(dev, base, 0, 0,
4880 MLX4_QP_FLOW_STEERING_DETACH,
4881 MLX4_CMD_TIME_CLASS_A,
4882 MLX4_CMD_NATIVE);
4883
4884 spin_lock_irq(mlx4_tlock(dev));
4885 rb_erase(&fs_rule->com.node,
4886 &tracker->res_tree[RES_FS_RULE]);
4887 list_del(&fs_rule->com.list);
4888 spin_unlock_irq(mlx4_tlock(dev));
4889 kfree(fs_rule);
4890 state = 0;
4891 break;
4892
4893 default:
4894 state = 0;
4895 }
4896 }
4897 }
4898 spin_lock_irq(mlx4_tlock(dev));
4899 }
4900 spin_unlock_irq(mlx4_tlock(dev));
4901 }
4902
rem_slave_eqs(struct mlx4_dev * dev,int slave)4903 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4904 {
4905 struct mlx4_priv *priv = mlx4_priv(dev);
4906 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4907 struct list_head *eq_list =
4908 &tracker->slave_list[slave].res_list[RES_EQ];
4909 struct res_eq *eq;
4910 struct res_eq *tmp;
4911 int err;
4912 int state;
4913 LIST_HEAD(tlist);
4914 int eqn;
4915
4916 err = move_all_busy(dev, slave, RES_EQ);
4917 if (err)
4918 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4919 slave);
4920
4921 spin_lock_irq(mlx4_tlock(dev));
4922 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4923 spin_unlock_irq(mlx4_tlock(dev));
4924 if (eq->com.owner == slave) {
4925 eqn = eq->com.res_id;
4926 state = eq->com.from_state;
4927 while (state != 0) {
4928 switch (state) {
4929 case RES_EQ_RESERVED:
4930 spin_lock_irq(mlx4_tlock(dev));
4931 rb_erase(&eq->com.node,
4932 &tracker->res_tree[RES_EQ]);
4933 list_del(&eq->com.list);
4934 spin_unlock_irq(mlx4_tlock(dev));
4935 kfree(eq);
4936 state = 0;
4937 break;
4938
4939 case RES_EQ_HW:
4940 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4941 1, MLX4_CMD_HW2SW_EQ,
4942 MLX4_CMD_TIME_CLASS_A,
4943 MLX4_CMD_NATIVE);
4944 if (err)
4945 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4946 slave, eqn & 0x3ff);
4947 atomic_dec(&eq->mtt->ref_count);
4948 state = RES_EQ_RESERVED;
4949 break;
4950
4951 default:
4952 state = 0;
4953 }
4954 }
4955 }
4956 spin_lock_irq(mlx4_tlock(dev));
4957 }
4958 spin_unlock_irq(mlx4_tlock(dev));
4959 }
4960
rem_slave_counters(struct mlx4_dev * dev,int slave)4961 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4962 {
4963 struct mlx4_priv *priv = mlx4_priv(dev);
4964 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4965 struct list_head *counter_list =
4966 &tracker->slave_list[slave].res_list[RES_COUNTER];
4967 struct res_counter *counter;
4968 struct res_counter *tmp;
4969 int err;
4970 int *counters_arr = NULL;
4971 int i, j;
4972
4973 err = move_all_busy(dev, slave, RES_COUNTER);
4974 if (err)
4975 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4976 slave);
4977
4978 counters_arr = kmalloc_array(dev->caps.max_counters,
4979 sizeof(*counters_arr), GFP_KERNEL);
4980 if (!counters_arr)
4981 return;
4982
4983 do {
4984 i = 0;
4985 j = 0;
4986 spin_lock_irq(mlx4_tlock(dev));
4987 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4988 if (counter->com.owner == slave) {
4989 counters_arr[i++] = counter->com.res_id;
4990 rb_erase(&counter->com.node,
4991 &tracker->res_tree[RES_COUNTER]);
4992 list_del(&counter->com.list);
4993 kfree(counter);
4994 }
4995 }
4996 spin_unlock_irq(mlx4_tlock(dev));
4997
4998 while (j < i) {
4999 __mlx4_counter_free(dev, counters_arr[j++]);
5000 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5001 }
5002 } while (i);
5003
5004 kfree(counters_arr);
5005 }
5006
rem_slave_xrcdns(struct mlx4_dev * dev,int slave)5007 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5008 {
5009 struct mlx4_priv *priv = mlx4_priv(dev);
5010 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5011 struct list_head *xrcdn_list =
5012 &tracker->slave_list[slave].res_list[RES_XRCD];
5013 struct res_xrcdn *xrcd;
5014 struct res_xrcdn *tmp;
5015 int err;
5016 int xrcdn;
5017
5018 err = move_all_busy(dev, slave, RES_XRCD);
5019 if (err)
5020 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5021 slave);
5022
5023 spin_lock_irq(mlx4_tlock(dev));
5024 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5025 if (xrcd->com.owner == slave) {
5026 xrcdn = xrcd->com.res_id;
5027 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5028 list_del(&xrcd->com.list);
5029 kfree(xrcd);
5030 __mlx4_xrcd_free(dev, xrcdn);
5031 }
5032 }
5033 spin_unlock_irq(mlx4_tlock(dev));
5034 }
5035
mlx4_delete_all_resources_for_slave(struct mlx4_dev * dev,int slave)5036 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5037 {
5038 struct mlx4_priv *priv = mlx4_priv(dev);
5039 mlx4_reset_roce_gids(dev, slave);
5040 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5041 rem_slave_vlans(dev, slave);
5042 rem_slave_macs(dev, slave);
5043 rem_slave_fs_rule(dev, slave);
5044 rem_slave_qps(dev, slave);
5045 rem_slave_srqs(dev, slave);
5046 rem_slave_cqs(dev, slave);
5047 rem_slave_mrs(dev, slave);
5048 rem_slave_eqs(dev, slave);
5049 rem_slave_mtts(dev, slave);
5050 rem_slave_counters(dev, slave);
5051 rem_slave_xrcdns(dev, slave);
5052 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5053 }
5054
update_qos_vpp(struct mlx4_update_qp_context * ctx,struct mlx4_vf_immed_vlan_work * work)5055 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5056 struct mlx4_vf_immed_vlan_work *work)
5057 {
5058 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5059 ctx->qp_context.qos_vport = work->qos_vport;
5060 }
5061
mlx4_vf_immed_vlan_work_handler(struct work_struct * _work)5062 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5063 {
5064 struct mlx4_vf_immed_vlan_work *work =
5065 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5066 struct mlx4_cmd_mailbox *mailbox;
5067 struct mlx4_update_qp_context *upd_context;
5068 struct mlx4_dev *dev = &work->priv->dev;
5069 struct mlx4_resource_tracker *tracker =
5070 &work->priv->mfunc.master.res_tracker;
5071 struct list_head *qp_list =
5072 &tracker->slave_list[work->slave].res_list[RES_QP];
5073 struct res_qp *qp;
5074 struct res_qp *tmp;
5075 u64 qp_path_mask_vlan_ctrl =
5076 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5077 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5078 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5079 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5080 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5081 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5082
5083 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5084 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5085 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5086 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5087 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5088 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5089 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5090
5091 int err;
5092 int port, errors = 0;
5093 u8 vlan_control;
5094
5095 if (mlx4_is_slave(dev)) {
5096 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5097 work->slave);
5098 goto out;
5099 }
5100
5101 mailbox = mlx4_alloc_cmd_mailbox(dev);
5102 if (IS_ERR(mailbox))
5103 goto out;
5104 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5105 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5106 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5107 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5108 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5109 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5110 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5111 else if (!work->vlan_id)
5112 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5113 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5114 else
5115 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5116 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5117 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5118
5119 upd_context = mailbox->buf;
5120 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5121
5122 spin_lock_irq(mlx4_tlock(dev));
5123 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5124 spin_unlock_irq(mlx4_tlock(dev));
5125 if (qp->com.owner == work->slave) {
5126 if (qp->com.from_state != RES_QP_HW ||
5127 !qp->sched_queue || /* no INIT2RTR trans yet */
5128 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5129 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5130 spin_lock_irq(mlx4_tlock(dev));
5131 continue;
5132 }
5133 port = (qp->sched_queue >> 6 & 1) + 1;
5134 if (port != work->port) {
5135 spin_lock_irq(mlx4_tlock(dev));
5136 continue;
5137 }
5138 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5139 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5140 else
5141 upd_context->primary_addr_path_mask =
5142 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5143 if (work->vlan_id == MLX4_VGT) {
5144 upd_context->qp_context.param3 = qp->param3;
5145 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5146 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5147 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5148 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5149 upd_context->qp_context.pri_path.feup = qp->feup;
5150 upd_context->qp_context.pri_path.sched_queue =
5151 qp->sched_queue;
5152 } else {
5153 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5154 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5155 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5156 upd_context->qp_context.pri_path.fvl_rx =
5157 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5158 upd_context->qp_context.pri_path.fl =
5159 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5160 upd_context->qp_context.pri_path.feup =
5161 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5162 upd_context->qp_context.pri_path.sched_queue =
5163 qp->sched_queue & 0xC7;
5164 upd_context->qp_context.pri_path.sched_queue |=
5165 ((work->qos & 0x7) << 3);
5166
5167 if (dev->caps.flags2 &
5168 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5169 update_qos_vpp(upd_context, work);
5170 }
5171
5172 err = mlx4_cmd(dev, mailbox->dma,
5173 qp->local_qpn & 0xffffff,
5174 0, MLX4_CMD_UPDATE_QP,
5175 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5176 if (err) {
5177 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5178 work->slave, port, qp->local_qpn, err);
5179 errors++;
5180 }
5181 }
5182 spin_lock_irq(mlx4_tlock(dev));
5183 }
5184 spin_unlock_irq(mlx4_tlock(dev));
5185 mlx4_free_cmd_mailbox(dev, mailbox);
5186
5187 if (errors)
5188 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5189 errors, work->slave, work->port);
5190
5191 /* unregister previous vlan_id if needed and we had no errors
5192 * while updating the QPs
5193 */
5194 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5195 NO_INDX != work->orig_vlan_ix)
5196 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5197 work->orig_vlan_id);
5198 out:
5199 kfree(work);
5200 return;
5201 }
5202