• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 
50 #define MLX4_MAC_VALID		(1ull << 63)
51 
52 struct mac_res {
53 	struct list_head list;
54 	u64 mac;
55 	int ref_count;
56 	u8 smac_index;
57 	u8 port;
58 };
59 
60 struct vlan_res {
61 	struct list_head list;
62 	u16 vlan;
63 	int ref_count;
64 	int vlan_index;
65 	u8 port;
66 };
67 
68 struct res_common {
69 	struct list_head	list;
70 	struct rb_node		node;
71 	u64		        res_id;
72 	int			owner;
73 	int			state;
74 	int			from_state;
75 	int			to_state;
76 	int			removing;
77 };
78 
79 enum {
80 	RES_ANY_BUSY = 1
81 };
82 
83 struct res_gid {
84 	struct list_head	list;
85 	u8			gid[16];
86 	enum mlx4_protocol	prot;
87 	enum mlx4_steer_type	steer;
88 	u64			reg_id;
89 };
90 
91 enum res_qp_states {
92 	RES_QP_BUSY = RES_ANY_BUSY,
93 
94 	/* QP number was allocated */
95 	RES_QP_RESERVED,
96 
97 	/* ICM memory for QP context was mapped */
98 	RES_QP_MAPPED,
99 
100 	/* QP is in hw ownership */
101 	RES_QP_HW
102 };
103 
104 struct res_qp {
105 	struct res_common	com;
106 	struct res_mtt	       *mtt;
107 	struct res_cq	       *rcq;
108 	struct res_cq	       *scq;
109 	struct res_srq	       *srq;
110 	struct list_head	mcg_list;
111 	spinlock_t		mcg_spl;
112 	int			local_qpn;
113 	atomic_t		ref_count;
114 	u32			qpc_flags;
115 	/* saved qp params before VST enforcement in order to restore on VGT */
116 	u8			sched_queue;
117 	__be32			param3;
118 	u8			vlan_control;
119 	u8			fvl_rx;
120 	u8			pri_path_fl;
121 	u8			vlan_index;
122 	u8			feup;
123 };
124 
125 enum res_mtt_states {
126 	RES_MTT_BUSY = RES_ANY_BUSY,
127 	RES_MTT_ALLOCATED,
128 };
129 
mtt_states_str(enum res_mtt_states state)130 static inline const char *mtt_states_str(enum res_mtt_states state)
131 {
132 	switch (state) {
133 	case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 	default: return "Unknown";
136 	}
137 }
138 
139 struct res_mtt {
140 	struct res_common	com;
141 	int			order;
142 	atomic_t		ref_count;
143 };
144 
145 enum res_mpt_states {
146 	RES_MPT_BUSY = RES_ANY_BUSY,
147 	RES_MPT_RESERVED,
148 	RES_MPT_MAPPED,
149 	RES_MPT_HW,
150 };
151 
152 struct res_mpt {
153 	struct res_common	com;
154 	struct res_mtt	       *mtt;
155 	int			key;
156 };
157 
158 enum res_eq_states {
159 	RES_EQ_BUSY = RES_ANY_BUSY,
160 	RES_EQ_RESERVED,
161 	RES_EQ_HW,
162 };
163 
164 struct res_eq {
165 	struct res_common	com;
166 	struct res_mtt	       *mtt;
167 };
168 
169 enum res_cq_states {
170 	RES_CQ_BUSY = RES_ANY_BUSY,
171 	RES_CQ_ALLOCATED,
172 	RES_CQ_HW,
173 };
174 
175 struct res_cq {
176 	struct res_common	com;
177 	struct res_mtt	       *mtt;
178 	atomic_t		ref_count;
179 };
180 
181 enum res_srq_states {
182 	RES_SRQ_BUSY = RES_ANY_BUSY,
183 	RES_SRQ_ALLOCATED,
184 	RES_SRQ_HW,
185 };
186 
187 struct res_srq {
188 	struct res_common	com;
189 	struct res_mtt	       *mtt;
190 	struct res_cq	       *cq;
191 	atomic_t		ref_count;
192 };
193 
194 enum res_counter_states {
195 	RES_COUNTER_BUSY = RES_ANY_BUSY,
196 	RES_COUNTER_ALLOCATED,
197 };
198 
199 struct res_counter {
200 	struct res_common	com;
201 	int			port;
202 };
203 
204 enum res_xrcdn_states {
205 	RES_XRCD_BUSY = RES_ANY_BUSY,
206 	RES_XRCD_ALLOCATED,
207 };
208 
209 struct res_xrcdn {
210 	struct res_common	com;
211 	int			port;
212 };
213 
214 enum res_fs_rule_states {
215 	RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 	RES_FS_RULE_ALLOCATED,
217 };
218 
219 struct res_fs_rule {
220 	struct res_common	com;
221 	int			qpn;
222 };
223 
mlx4_is_eth(struct mlx4_dev * dev,int port)224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225 {
226 	return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227 }
228 
res_tracker_lookup(struct rb_root * root,u64 res_id)229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230 {
231 	struct rb_node *node = root->rb_node;
232 
233 	while (node) {
234 		struct res_common *res = container_of(node, struct res_common,
235 						      node);
236 
237 		if (res_id < res->res_id)
238 			node = node->rb_left;
239 		else if (res_id > res->res_id)
240 			node = node->rb_right;
241 		else
242 			return res;
243 	}
244 	return NULL;
245 }
246 
res_tracker_insert(struct rb_root * root,struct res_common * res)247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248 {
249 	struct rb_node **new = &(root->rb_node), *parent = NULL;
250 
251 	/* Figure out where to put new node */
252 	while (*new) {
253 		struct res_common *this = container_of(*new, struct res_common,
254 						       node);
255 
256 		parent = *new;
257 		if (res->res_id < this->res_id)
258 			new = &((*new)->rb_left);
259 		else if (res->res_id > this->res_id)
260 			new = &((*new)->rb_right);
261 		else
262 			return -EEXIST;
263 	}
264 
265 	/* Add new node and rebalance tree. */
266 	rb_link_node(&res->node, parent, new);
267 	rb_insert_color(&res->node, root);
268 
269 	return 0;
270 }
271 
272 enum qp_transition {
273 	QP_TRANS_INIT2RTR,
274 	QP_TRANS_RTR2RTS,
275 	QP_TRANS_RTS2RTS,
276 	QP_TRANS_SQERR2RTS,
277 	QP_TRANS_SQD2SQD,
278 	QP_TRANS_SQD2RTS
279 };
280 
281 /* For Debug uses */
resource_str(enum mlx4_resource rt)282 static const char *resource_str(enum mlx4_resource rt)
283 {
284 	switch (rt) {
285 	case RES_QP: return "RES_QP";
286 	case RES_CQ: return "RES_CQ";
287 	case RES_SRQ: return "RES_SRQ";
288 	case RES_MPT: return "RES_MPT";
289 	case RES_MTT: return "RES_MTT";
290 	case RES_MAC: return  "RES_MAC";
291 	case RES_VLAN: return  "RES_VLAN";
292 	case RES_EQ: return "RES_EQ";
293 	case RES_COUNTER: return "RES_COUNTER";
294 	case RES_FS_RULE: return "RES_FS_RULE";
295 	case RES_XRCD: return "RES_XRCD";
296 	default: return "Unknown resource type !!!";
297 	};
298 }
299 
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
mlx4_grant_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 				      enum mlx4_resource res_type, int count,
303 				      int port)
304 {
305 	struct mlx4_priv *priv = mlx4_priv(dev);
306 	struct resource_allocator *res_alloc =
307 		&priv->mfunc.master.res_tracker.res_alloc[res_type];
308 	int err = -EINVAL;
309 	int allocated, free, reserved, guaranteed, from_free;
310 	int from_rsvd;
311 
312 	if (slave > dev->num_vfs)
313 		return -EINVAL;
314 
315 	spin_lock(&res_alloc->alloc_lock);
316 	allocated = (port > 0) ?
317 		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
318 		res_alloc->allocated[slave];
319 	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 		res_alloc->res_free;
321 	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
322 		res_alloc->res_reserved;
323 	guaranteed = res_alloc->guaranteed[slave];
324 
325 	if (allocated + count > res_alloc->quota[slave]) {
326 		mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 			  slave, port, resource_str(res_type), count,
328 			  allocated, res_alloc->quota[slave]);
329 		goto out;
330 	}
331 
332 	if (allocated + count <= guaranteed) {
333 		err = 0;
334 		from_rsvd = count;
335 	} else {
336 		/* portion may need to be obtained from free area */
337 		if (guaranteed - allocated > 0)
338 			from_free = count - (guaranteed - allocated);
339 		else
340 			from_free = count;
341 
342 		from_rsvd = count - from_free;
343 
344 		if (free - from_free >= reserved)
345 			err = 0;
346 		else
347 			mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 				  slave, port, resource_str(res_type), free,
349 				  from_free, reserved);
350 	}
351 
352 	if (!err) {
353 		/* grant the request */
354 		if (port > 0) {
355 			res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
356 			res_alloc->res_port_free[port - 1] -= count;
357 			res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358 		} else {
359 			res_alloc->allocated[slave] += count;
360 			res_alloc->res_free -= count;
361 			res_alloc->res_reserved -= from_rsvd;
362 		}
363 	}
364 
365 out:
366 	spin_unlock(&res_alloc->alloc_lock);
367 	return err;
368 }
369 
mlx4_release_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 				    enum mlx4_resource res_type, int count,
372 				    int port)
373 {
374 	struct mlx4_priv *priv = mlx4_priv(dev);
375 	struct resource_allocator *res_alloc =
376 		&priv->mfunc.master.res_tracker.res_alloc[res_type];
377 	int allocated, guaranteed, from_rsvd;
378 
379 	if (slave > dev->num_vfs)
380 		return;
381 
382 	spin_lock(&res_alloc->alloc_lock);
383 
384 	allocated = (port > 0) ?
385 		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 		res_alloc->allocated[slave];
387 	guaranteed = res_alloc->guaranteed[slave];
388 
389 	if (allocated - count >= guaranteed) {
390 		from_rsvd = 0;
391 	} else {
392 		/* portion may need to be returned to reserved area */
393 		if (allocated - guaranteed > 0)
394 			from_rsvd = count - (allocated - guaranteed);
395 		else
396 			from_rsvd = count;
397 	}
398 
399 	if (port > 0) {
400 		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
401 		res_alloc->res_port_free[port - 1] += count;
402 		res_alloc->res_port_rsvd[port - 1] += from_rsvd;
403 	} else {
404 		res_alloc->allocated[slave] -= count;
405 		res_alloc->res_free += count;
406 		res_alloc->res_reserved += from_rsvd;
407 	}
408 
409 	spin_unlock(&res_alloc->alloc_lock);
410 	return;
411 }
412 
initialize_res_quotas(struct mlx4_dev * dev,struct resource_allocator * res_alloc,enum mlx4_resource res_type,int vf,int num_instances)413 static inline void initialize_res_quotas(struct mlx4_dev *dev,
414 					 struct resource_allocator *res_alloc,
415 					 enum mlx4_resource res_type,
416 					 int vf, int num_instances)
417 {
418 	res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
419 	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 	if (vf == mlx4_master_func_num(dev)) {
421 		res_alloc->res_free = num_instances;
422 		if (res_type == RES_MTT) {
423 			/* reserved mtts will be taken out of the PF allocation */
424 			res_alloc->res_free += dev->caps.reserved_mtts;
425 			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426 			res_alloc->quota[vf] += dev->caps.reserved_mtts;
427 		}
428 	}
429 }
430 
mlx4_init_quotas(struct mlx4_dev * dev)431 void mlx4_init_quotas(struct mlx4_dev *dev)
432 {
433 	struct mlx4_priv *priv = mlx4_priv(dev);
434 	int pf;
435 
436 	/* quotas for VFs are initialized in mlx4_slave_cap */
437 	if (mlx4_is_slave(dev))
438 		return;
439 
440 	if (!mlx4_is_mfunc(dev)) {
441 		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442 			mlx4_num_reserved_sqps(dev);
443 		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444 		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445 		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446 		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
447 		return;
448 	}
449 
450 	pf = mlx4_master_func_num(dev);
451 	dev->quotas.qp =
452 		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
453 	dev->quotas.cq =
454 		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
455 	dev->quotas.srq =
456 		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
457 	dev->quotas.mtt =
458 		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
459 	dev->quotas.mpt =
460 		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
461 }
mlx4_init_resource_tracker(struct mlx4_dev * dev)462 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
463 {
464 	struct mlx4_priv *priv = mlx4_priv(dev);
465 	int i, j;
466 	int t;
467 
468 	priv->mfunc.master.res_tracker.slave_list =
469 		kzalloc(dev->num_slaves * sizeof(struct slave_list),
470 			GFP_KERNEL);
471 	if (!priv->mfunc.master.res_tracker.slave_list)
472 		return -ENOMEM;
473 
474 	for (i = 0 ; i < dev->num_slaves; i++) {
475 		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476 			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477 				       slave_list[i].res_list[t]);
478 		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
479 	}
480 
481 	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
482 		 dev->num_slaves);
483 	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484 		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
485 
486 	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 		struct resource_allocator *res_alloc =
488 			&priv->mfunc.master.res_tracker.res_alloc[i];
489 		res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
490 		res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
491 		if (i == RES_MAC || i == RES_VLAN)
492 			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493 						       (dev->num_vfs + 1) * sizeof(int),
494 							GFP_KERNEL);
495 		else
496 			res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
497 
498 		if (!res_alloc->quota || !res_alloc->guaranteed ||
499 		    !res_alloc->allocated)
500 			goto no_mem_err;
501 
502 		spin_lock_init(&res_alloc->alloc_lock);
503 		for (t = 0; t < dev->num_vfs + 1; t++) {
504 			struct mlx4_active_ports actv_ports =
505 				mlx4_get_active_ports(dev, t);
506 			switch (i) {
507 			case RES_QP:
508 				initialize_res_quotas(dev, res_alloc, RES_QP,
509 						      t, dev->caps.num_qps -
510 						      dev->caps.reserved_qps -
511 						      mlx4_num_reserved_sqps(dev));
512 				break;
513 			case RES_CQ:
514 				initialize_res_quotas(dev, res_alloc, RES_CQ,
515 						      t, dev->caps.num_cqs -
516 						      dev->caps.reserved_cqs);
517 				break;
518 			case RES_SRQ:
519 				initialize_res_quotas(dev, res_alloc, RES_SRQ,
520 						      t, dev->caps.num_srqs -
521 						      dev->caps.reserved_srqs);
522 				break;
523 			case RES_MPT:
524 				initialize_res_quotas(dev, res_alloc, RES_MPT,
525 						      t, dev->caps.num_mpts -
526 						      dev->caps.reserved_mrws);
527 				break;
528 			case RES_MTT:
529 				initialize_res_quotas(dev, res_alloc, RES_MTT,
530 						      t, dev->caps.num_mtts -
531 						      dev->caps.reserved_mtts);
532 				break;
533 			case RES_MAC:
534 				if (t == mlx4_master_func_num(dev)) {
535 					int max_vfs_pport = 0;
536 					/* Calculate the max vfs per port for */
537 					/* both ports.			      */
538 					for (j = 0; j < dev->caps.num_ports;
539 					     j++) {
540 						struct mlx4_slaves_pport slaves_pport =
541 							mlx4_phys_to_slaves_pport(dev, j + 1);
542 						unsigned current_slaves =
543 							bitmap_weight(slaves_pport.slaves,
544 								      dev->caps.num_ports) - 1;
545 						if (max_vfs_pport < current_slaves)
546 							max_vfs_pport =
547 								current_slaves;
548 					}
549 					res_alloc->quota[t] =
550 						MLX4_MAX_MAC_NUM -
551 						2 * max_vfs_pport;
552 					res_alloc->guaranteed[t] = 2;
553 					for (j = 0; j < MLX4_MAX_PORTS; j++)
554 						res_alloc->res_port_free[j] =
555 							MLX4_MAX_MAC_NUM;
556 				} else {
557 					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
558 					res_alloc->guaranteed[t] = 2;
559 				}
560 				break;
561 			case RES_VLAN:
562 				if (t == mlx4_master_func_num(dev)) {
563 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
564 					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
565 					for (j = 0; j < MLX4_MAX_PORTS; j++)
566 						res_alloc->res_port_free[j] =
567 							res_alloc->quota[t];
568 				} else {
569 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
570 					res_alloc->guaranteed[t] = 0;
571 				}
572 				break;
573 			case RES_COUNTER:
574 				res_alloc->quota[t] = dev->caps.max_counters;
575 				res_alloc->guaranteed[t] = 0;
576 				if (t == mlx4_master_func_num(dev))
577 					res_alloc->res_free = res_alloc->quota[t];
578 				break;
579 			default:
580 				break;
581 			}
582 			if (i == RES_MAC || i == RES_VLAN) {
583 				for (j = 0; j < dev->caps.num_ports; j++)
584 					if (test_bit(j, actv_ports.ports))
585 						res_alloc->res_port_rsvd[j] +=
586 							res_alloc->guaranteed[t];
587 			} else {
588 				res_alloc->res_reserved += res_alloc->guaranteed[t];
589 			}
590 		}
591 	}
592 	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
593 	return 0;
594 
595 no_mem_err:
596 	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
597 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
598 		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
599 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
600 		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
601 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
602 		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
603 	}
604 	return -ENOMEM;
605 }
606 
mlx4_free_resource_tracker(struct mlx4_dev * dev,enum mlx4_res_tracker_free_type type)607 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
608 				enum mlx4_res_tracker_free_type type)
609 {
610 	struct mlx4_priv *priv = mlx4_priv(dev);
611 	int i;
612 
613 	if (priv->mfunc.master.res_tracker.slave_list) {
614 		if (type != RES_TR_FREE_STRUCTS_ONLY) {
615 			for (i = 0; i < dev->num_slaves; i++) {
616 				if (type == RES_TR_FREE_ALL ||
617 				    dev->caps.function != i)
618 					mlx4_delete_all_resources_for_slave(dev, i);
619 			}
620 			/* free master's vlans */
621 			i = dev->caps.function;
622 			mlx4_reset_roce_gids(dev, i);
623 			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
624 			rem_slave_vlans(dev, i);
625 			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
626 		}
627 
628 		if (type != RES_TR_FREE_SLAVES_ONLY) {
629 			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
630 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
631 				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
632 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
633 				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
634 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
635 				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
636 			}
637 			kfree(priv->mfunc.master.res_tracker.slave_list);
638 			priv->mfunc.master.res_tracker.slave_list = NULL;
639 		}
640 	}
641 }
642 
update_pkey_index(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox)643 static void update_pkey_index(struct mlx4_dev *dev, int slave,
644 			      struct mlx4_cmd_mailbox *inbox)
645 {
646 	u8 sched = *(u8 *)(inbox->buf + 64);
647 	u8 orig_index = *(u8 *)(inbox->buf + 35);
648 	u8 new_index;
649 	struct mlx4_priv *priv = mlx4_priv(dev);
650 	int port;
651 
652 	port = (sched >> 6 & 1) + 1;
653 
654 	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
655 	*(u8 *)(inbox->buf + 35) = new_index;
656 }
657 
update_gid(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave)658 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
659 		       u8 slave)
660 {
661 	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
662 	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
663 	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
664 	int port;
665 
666 	if (MLX4_QP_ST_UD == ts) {
667 		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
668 		if (mlx4_is_eth(dev, port))
669 			qp_ctx->pri_path.mgid_index =
670 				mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
671 		else
672 			qp_ctx->pri_path.mgid_index = slave | 0x80;
673 
674 	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
675 		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
676 			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
677 			if (mlx4_is_eth(dev, port)) {
678 				qp_ctx->pri_path.mgid_index +=
679 					mlx4_get_base_gid_ix(dev, slave, port);
680 				qp_ctx->pri_path.mgid_index &= 0x7f;
681 			} else {
682 				qp_ctx->pri_path.mgid_index = slave & 0x7F;
683 			}
684 		}
685 		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
686 			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
687 			if (mlx4_is_eth(dev, port)) {
688 				qp_ctx->alt_path.mgid_index +=
689 					mlx4_get_base_gid_ix(dev, slave, port);
690 				qp_ctx->alt_path.mgid_index &= 0x7f;
691 			} else {
692 				qp_ctx->alt_path.mgid_index = slave & 0x7F;
693 			}
694 		}
695 	}
696 }
697 
update_vport_qp_param(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave,u32 qpn)698 static int update_vport_qp_param(struct mlx4_dev *dev,
699 				 struct mlx4_cmd_mailbox *inbox,
700 				 u8 slave, u32 qpn)
701 {
702 	struct mlx4_qp_context	*qpc = inbox->buf + 8;
703 	struct mlx4_vport_oper_state *vp_oper;
704 	struct mlx4_priv *priv;
705 	u32 qp_type;
706 	int port;
707 
708 	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
709 	priv = mlx4_priv(dev);
710 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
711 	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
712 
713 	if (MLX4_VGT != vp_oper->state.default_vlan) {
714 		/* the reserved QPs (special, proxy, tunnel)
715 		 * do not operate over vlans
716 		 */
717 		if (mlx4_is_qp_reserved(dev, qpn))
718 			return 0;
719 
720 		/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
721 		if (qp_type == MLX4_QP_ST_UD ||
722 		    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
723 			if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
724 				*(__be32 *)inbox->buf =
725 					cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
726 					MLX4_QP_OPTPAR_VLAN_STRIPPING);
727 				qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
728 			} else {
729 				struct mlx4_update_qp_params params = {.flags = 0};
730 
731 				mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
732 			}
733 		}
734 
735 		if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
736 		    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
737 			qpc->pri_path.vlan_control =
738 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
739 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
740 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
741 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
742 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
743 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
744 		} else if (0 != vp_oper->state.default_vlan) {
745 			qpc->pri_path.vlan_control =
746 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
747 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
748 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
749 		} else { /* priority tagged */
750 			qpc->pri_path.vlan_control =
751 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
752 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
753 		}
754 
755 		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
756 		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
757 		qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
758 		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
759 		qpc->pri_path.sched_queue &= 0xC7;
760 		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
761 	}
762 	if (vp_oper->state.spoofchk) {
763 		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
764 		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
765 	}
766 	return 0;
767 }
768 
mpt_mask(struct mlx4_dev * dev)769 static int mpt_mask(struct mlx4_dev *dev)
770 {
771 	return dev->caps.num_mpts - 1;
772 }
773 
find_res(struct mlx4_dev * dev,u64 res_id,enum mlx4_resource type)774 static void *find_res(struct mlx4_dev *dev, u64 res_id,
775 		      enum mlx4_resource type)
776 {
777 	struct mlx4_priv *priv = mlx4_priv(dev);
778 
779 	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
780 				  res_id);
781 }
782 
get_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type,void * res)783 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
784 		   enum mlx4_resource type,
785 		   void *res)
786 {
787 	struct res_common *r;
788 	int err = 0;
789 
790 	spin_lock_irq(mlx4_tlock(dev));
791 	r = find_res(dev, res_id, type);
792 	if (!r) {
793 		err = -ENONET;
794 		goto exit;
795 	}
796 
797 	if (r->state == RES_ANY_BUSY) {
798 		err = -EBUSY;
799 		goto exit;
800 	}
801 
802 	if (r->owner != slave) {
803 		err = -EPERM;
804 		goto exit;
805 	}
806 
807 	r->from_state = r->state;
808 	r->state = RES_ANY_BUSY;
809 
810 	if (res)
811 		*((struct res_common **)res) = r;
812 
813 exit:
814 	spin_unlock_irq(mlx4_tlock(dev));
815 	return err;
816 }
817 
mlx4_get_slave_from_resource_id(struct mlx4_dev * dev,enum mlx4_resource type,u64 res_id,int * slave)818 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
819 				    enum mlx4_resource type,
820 				    u64 res_id, int *slave)
821 {
822 
823 	struct res_common *r;
824 	int err = -ENOENT;
825 	int id = res_id;
826 
827 	if (type == RES_QP)
828 		id &= 0x7fffff;
829 	spin_lock(mlx4_tlock(dev));
830 
831 	r = find_res(dev, id, type);
832 	if (r) {
833 		*slave = r->owner;
834 		err = 0;
835 	}
836 	spin_unlock(mlx4_tlock(dev));
837 
838 	return err;
839 }
840 
put_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type)841 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
842 		    enum mlx4_resource type)
843 {
844 	struct res_common *r;
845 
846 	spin_lock_irq(mlx4_tlock(dev));
847 	r = find_res(dev, res_id, type);
848 	if (r)
849 		r->state = r->from_state;
850 	spin_unlock_irq(mlx4_tlock(dev));
851 }
852 
alloc_qp_tr(int id)853 static struct res_common *alloc_qp_tr(int id)
854 {
855 	struct res_qp *ret;
856 
857 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
858 	if (!ret)
859 		return NULL;
860 
861 	ret->com.res_id = id;
862 	ret->com.state = RES_QP_RESERVED;
863 	ret->local_qpn = id;
864 	INIT_LIST_HEAD(&ret->mcg_list);
865 	spin_lock_init(&ret->mcg_spl);
866 	atomic_set(&ret->ref_count, 0);
867 
868 	return &ret->com;
869 }
870 
alloc_mtt_tr(int id,int order)871 static struct res_common *alloc_mtt_tr(int id, int order)
872 {
873 	struct res_mtt *ret;
874 
875 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
876 	if (!ret)
877 		return NULL;
878 
879 	ret->com.res_id = id;
880 	ret->order = order;
881 	ret->com.state = RES_MTT_ALLOCATED;
882 	atomic_set(&ret->ref_count, 0);
883 
884 	return &ret->com;
885 }
886 
alloc_mpt_tr(int id,int key)887 static struct res_common *alloc_mpt_tr(int id, int key)
888 {
889 	struct res_mpt *ret;
890 
891 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
892 	if (!ret)
893 		return NULL;
894 
895 	ret->com.res_id = id;
896 	ret->com.state = RES_MPT_RESERVED;
897 	ret->key = key;
898 
899 	return &ret->com;
900 }
901 
alloc_eq_tr(int id)902 static struct res_common *alloc_eq_tr(int id)
903 {
904 	struct res_eq *ret;
905 
906 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
907 	if (!ret)
908 		return NULL;
909 
910 	ret->com.res_id = id;
911 	ret->com.state = RES_EQ_RESERVED;
912 
913 	return &ret->com;
914 }
915 
alloc_cq_tr(int id)916 static struct res_common *alloc_cq_tr(int id)
917 {
918 	struct res_cq *ret;
919 
920 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
921 	if (!ret)
922 		return NULL;
923 
924 	ret->com.res_id = id;
925 	ret->com.state = RES_CQ_ALLOCATED;
926 	atomic_set(&ret->ref_count, 0);
927 
928 	return &ret->com;
929 }
930 
alloc_srq_tr(int id)931 static struct res_common *alloc_srq_tr(int id)
932 {
933 	struct res_srq *ret;
934 
935 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
936 	if (!ret)
937 		return NULL;
938 
939 	ret->com.res_id = id;
940 	ret->com.state = RES_SRQ_ALLOCATED;
941 	atomic_set(&ret->ref_count, 0);
942 
943 	return &ret->com;
944 }
945 
alloc_counter_tr(int id)946 static struct res_common *alloc_counter_tr(int id)
947 {
948 	struct res_counter *ret;
949 
950 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
951 	if (!ret)
952 		return NULL;
953 
954 	ret->com.res_id = id;
955 	ret->com.state = RES_COUNTER_ALLOCATED;
956 
957 	return &ret->com;
958 }
959 
alloc_xrcdn_tr(int id)960 static struct res_common *alloc_xrcdn_tr(int id)
961 {
962 	struct res_xrcdn *ret;
963 
964 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
965 	if (!ret)
966 		return NULL;
967 
968 	ret->com.res_id = id;
969 	ret->com.state = RES_XRCD_ALLOCATED;
970 
971 	return &ret->com;
972 }
973 
alloc_fs_rule_tr(u64 id,int qpn)974 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
975 {
976 	struct res_fs_rule *ret;
977 
978 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
979 	if (!ret)
980 		return NULL;
981 
982 	ret->com.res_id = id;
983 	ret->com.state = RES_FS_RULE_ALLOCATED;
984 	ret->qpn = qpn;
985 	return &ret->com;
986 }
987 
alloc_tr(u64 id,enum mlx4_resource type,int slave,int extra)988 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
989 				   int extra)
990 {
991 	struct res_common *ret;
992 
993 	switch (type) {
994 	case RES_QP:
995 		ret = alloc_qp_tr(id);
996 		break;
997 	case RES_MPT:
998 		ret = alloc_mpt_tr(id, extra);
999 		break;
1000 	case RES_MTT:
1001 		ret = alloc_mtt_tr(id, extra);
1002 		break;
1003 	case RES_EQ:
1004 		ret = alloc_eq_tr(id);
1005 		break;
1006 	case RES_CQ:
1007 		ret = alloc_cq_tr(id);
1008 		break;
1009 	case RES_SRQ:
1010 		ret = alloc_srq_tr(id);
1011 		break;
1012 	case RES_MAC:
1013 		pr_err("implementation missing\n");
1014 		return NULL;
1015 	case RES_COUNTER:
1016 		ret = alloc_counter_tr(id);
1017 		break;
1018 	case RES_XRCD:
1019 		ret = alloc_xrcdn_tr(id);
1020 		break;
1021 	case RES_FS_RULE:
1022 		ret = alloc_fs_rule_tr(id, extra);
1023 		break;
1024 	default:
1025 		return NULL;
1026 	}
1027 	if (ret)
1028 		ret->owner = slave;
1029 
1030 	return ret;
1031 }
1032 
add_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1033 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1034 			 enum mlx4_resource type, int extra)
1035 {
1036 	int i;
1037 	int err;
1038 	struct mlx4_priv *priv = mlx4_priv(dev);
1039 	struct res_common **res_arr;
1040 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1041 	struct rb_root *root = &tracker->res_tree[type];
1042 
1043 	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1044 	if (!res_arr)
1045 		return -ENOMEM;
1046 
1047 	for (i = 0; i < count; ++i) {
1048 		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1049 		if (!res_arr[i]) {
1050 			for (--i; i >= 0; --i)
1051 				kfree(res_arr[i]);
1052 
1053 			kfree(res_arr);
1054 			return -ENOMEM;
1055 		}
1056 	}
1057 
1058 	spin_lock_irq(mlx4_tlock(dev));
1059 	for (i = 0; i < count; ++i) {
1060 		if (find_res(dev, base + i, type)) {
1061 			err = -EEXIST;
1062 			goto undo;
1063 		}
1064 		err = res_tracker_insert(root, res_arr[i]);
1065 		if (err)
1066 			goto undo;
1067 		list_add_tail(&res_arr[i]->list,
1068 			      &tracker->slave_list[slave].res_list[type]);
1069 	}
1070 	spin_unlock_irq(mlx4_tlock(dev));
1071 	kfree(res_arr);
1072 
1073 	return 0;
1074 
1075 undo:
1076 	for (--i; i >= base; --i)
1077 		rb_erase(&res_arr[i]->node, root);
1078 
1079 	spin_unlock_irq(mlx4_tlock(dev));
1080 
1081 	for (i = 0; i < count; ++i)
1082 		kfree(res_arr[i]);
1083 
1084 	kfree(res_arr);
1085 
1086 	return err;
1087 }
1088 
remove_qp_ok(struct res_qp * res)1089 static int remove_qp_ok(struct res_qp *res)
1090 {
1091 	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1092 	    !list_empty(&res->mcg_list)) {
1093 		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1094 		       res->com.state, atomic_read(&res->ref_count));
1095 		return -EBUSY;
1096 	} else if (res->com.state != RES_QP_RESERVED) {
1097 		return -EPERM;
1098 	}
1099 
1100 	return 0;
1101 }
1102 
remove_mtt_ok(struct res_mtt * res,int order)1103 static int remove_mtt_ok(struct res_mtt *res, int order)
1104 {
1105 	if (res->com.state == RES_MTT_BUSY ||
1106 	    atomic_read(&res->ref_count)) {
1107 		pr_devel("%s-%d: state %s, ref_count %d\n",
1108 			 __func__, __LINE__,
1109 			 mtt_states_str(res->com.state),
1110 			 atomic_read(&res->ref_count));
1111 		return -EBUSY;
1112 	} else if (res->com.state != RES_MTT_ALLOCATED)
1113 		return -EPERM;
1114 	else if (res->order != order)
1115 		return -EINVAL;
1116 
1117 	return 0;
1118 }
1119 
remove_mpt_ok(struct res_mpt * res)1120 static int remove_mpt_ok(struct res_mpt *res)
1121 {
1122 	if (res->com.state == RES_MPT_BUSY)
1123 		return -EBUSY;
1124 	else if (res->com.state != RES_MPT_RESERVED)
1125 		return -EPERM;
1126 
1127 	return 0;
1128 }
1129 
remove_eq_ok(struct res_eq * res)1130 static int remove_eq_ok(struct res_eq *res)
1131 {
1132 	if (res->com.state == RES_MPT_BUSY)
1133 		return -EBUSY;
1134 	else if (res->com.state != RES_MPT_RESERVED)
1135 		return -EPERM;
1136 
1137 	return 0;
1138 }
1139 
remove_counter_ok(struct res_counter * res)1140 static int remove_counter_ok(struct res_counter *res)
1141 {
1142 	if (res->com.state == RES_COUNTER_BUSY)
1143 		return -EBUSY;
1144 	else if (res->com.state != RES_COUNTER_ALLOCATED)
1145 		return -EPERM;
1146 
1147 	return 0;
1148 }
1149 
remove_xrcdn_ok(struct res_xrcdn * res)1150 static int remove_xrcdn_ok(struct res_xrcdn *res)
1151 {
1152 	if (res->com.state == RES_XRCD_BUSY)
1153 		return -EBUSY;
1154 	else if (res->com.state != RES_XRCD_ALLOCATED)
1155 		return -EPERM;
1156 
1157 	return 0;
1158 }
1159 
remove_fs_rule_ok(struct res_fs_rule * res)1160 static int remove_fs_rule_ok(struct res_fs_rule *res)
1161 {
1162 	if (res->com.state == RES_FS_RULE_BUSY)
1163 		return -EBUSY;
1164 	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1165 		return -EPERM;
1166 
1167 	return 0;
1168 }
1169 
remove_cq_ok(struct res_cq * res)1170 static int remove_cq_ok(struct res_cq *res)
1171 {
1172 	if (res->com.state == RES_CQ_BUSY)
1173 		return -EBUSY;
1174 	else if (res->com.state != RES_CQ_ALLOCATED)
1175 		return -EPERM;
1176 
1177 	return 0;
1178 }
1179 
remove_srq_ok(struct res_srq * res)1180 static int remove_srq_ok(struct res_srq *res)
1181 {
1182 	if (res->com.state == RES_SRQ_BUSY)
1183 		return -EBUSY;
1184 	else if (res->com.state != RES_SRQ_ALLOCATED)
1185 		return -EPERM;
1186 
1187 	return 0;
1188 }
1189 
remove_ok(struct res_common * res,enum mlx4_resource type,int extra)1190 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1191 {
1192 	switch (type) {
1193 	case RES_QP:
1194 		return remove_qp_ok((struct res_qp *)res);
1195 	case RES_CQ:
1196 		return remove_cq_ok((struct res_cq *)res);
1197 	case RES_SRQ:
1198 		return remove_srq_ok((struct res_srq *)res);
1199 	case RES_MPT:
1200 		return remove_mpt_ok((struct res_mpt *)res);
1201 	case RES_MTT:
1202 		return remove_mtt_ok((struct res_mtt *)res, extra);
1203 	case RES_MAC:
1204 		return -ENOSYS;
1205 	case RES_EQ:
1206 		return remove_eq_ok((struct res_eq *)res);
1207 	case RES_COUNTER:
1208 		return remove_counter_ok((struct res_counter *)res);
1209 	case RES_XRCD:
1210 		return remove_xrcdn_ok((struct res_xrcdn *)res);
1211 	case RES_FS_RULE:
1212 		return remove_fs_rule_ok((struct res_fs_rule *)res);
1213 	default:
1214 		return -EINVAL;
1215 	}
1216 }
1217 
rem_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1218 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1219 			 enum mlx4_resource type, int extra)
1220 {
1221 	u64 i;
1222 	int err;
1223 	struct mlx4_priv *priv = mlx4_priv(dev);
1224 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225 	struct res_common *r;
1226 
1227 	spin_lock_irq(mlx4_tlock(dev));
1228 	for (i = base; i < base + count; ++i) {
1229 		r = res_tracker_lookup(&tracker->res_tree[type], i);
1230 		if (!r) {
1231 			err = -ENOENT;
1232 			goto out;
1233 		}
1234 		if (r->owner != slave) {
1235 			err = -EPERM;
1236 			goto out;
1237 		}
1238 		err = remove_ok(r, type, extra);
1239 		if (err)
1240 			goto out;
1241 	}
1242 
1243 	for (i = base; i < base + count; ++i) {
1244 		r = res_tracker_lookup(&tracker->res_tree[type], i);
1245 		rb_erase(&r->node, &tracker->res_tree[type]);
1246 		list_del(&r->list);
1247 		kfree(r);
1248 	}
1249 	err = 0;
1250 
1251 out:
1252 	spin_unlock_irq(mlx4_tlock(dev));
1253 
1254 	return err;
1255 }
1256 
qp_res_start_move_to(struct mlx4_dev * dev,int slave,int qpn,enum res_qp_states state,struct res_qp ** qp,int alloc)1257 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1258 				enum res_qp_states state, struct res_qp **qp,
1259 				int alloc)
1260 {
1261 	struct mlx4_priv *priv = mlx4_priv(dev);
1262 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1263 	struct res_qp *r;
1264 	int err = 0;
1265 
1266 	spin_lock_irq(mlx4_tlock(dev));
1267 	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1268 	if (!r)
1269 		err = -ENOENT;
1270 	else if (r->com.owner != slave)
1271 		err = -EPERM;
1272 	else {
1273 		switch (state) {
1274 		case RES_QP_BUSY:
1275 			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1276 				 __func__, r->com.res_id);
1277 			err = -EBUSY;
1278 			break;
1279 
1280 		case RES_QP_RESERVED:
1281 			if (r->com.state == RES_QP_MAPPED && !alloc)
1282 				break;
1283 
1284 			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1285 			err = -EINVAL;
1286 			break;
1287 
1288 		case RES_QP_MAPPED:
1289 			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1290 			    r->com.state == RES_QP_HW)
1291 				break;
1292 			else {
1293 				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1294 					  r->com.res_id);
1295 				err = -EINVAL;
1296 			}
1297 
1298 			break;
1299 
1300 		case RES_QP_HW:
1301 			if (r->com.state != RES_QP_MAPPED)
1302 				err = -EINVAL;
1303 			break;
1304 		default:
1305 			err = -EINVAL;
1306 		}
1307 
1308 		if (!err) {
1309 			r->com.from_state = r->com.state;
1310 			r->com.to_state = state;
1311 			r->com.state = RES_QP_BUSY;
1312 			if (qp)
1313 				*qp = r;
1314 		}
1315 	}
1316 
1317 	spin_unlock_irq(mlx4_tlock(dev));
1318 
1319 	return err;
1320 }
1321 
mr_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_mpt_states state,struct res_mpt ** mpt)1322 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1323 				enum res_mpt_states state, struct res_mpt **mpt)
1324 {
1325 	struct mlx4_priv *priv = mlx4_priv(dev);
1326 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1327 	struct res_mpt *r;
1328 	int err = 0;
1329 
1330 	spin_lock_irq(mlx4_tlock(dev));
1331 	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1332 	if (!r)
1333 		err = -ENOENT;
1334 	else if (r->com.owner != slave)
1335 		err = -EPERM;
1336 	else {
1337 		switch (state) {
1338 		case RES_MPT_BUSY:
1339 			err = -EINVAL;
1340 			break;
1341 
1342 		case RES_MPT_RESERVED:
1343 			if (r->com.state != RES_MPT_MAPPED)
1344 				err = -EINVAL;
1345 			break;
1346 
1347 		case RES_MPT_MAPPED:
1348 			if (r->com.state != RES_MPT_RESERVED &&
1349 			    r->com.state != RES_MPT_HW)
1350 				err = -EINVAL;
1351 			break;
1352 
1353 		case RES_MPT_HW:
1354 			if (r->com.state != RES_MPT_MAPPED)
1355 				err = -EINVAL;
1356 			break;
1357 		default:
1358 			err = -EINVAL;
1359 		}
1360 
1361 		if (!err) {
1362 			r->com.from_state = r->com.state;
1363 			r->com.to_state = state;
1364 			r->com.state = RES_MPT_BUSY;
1365 			if (mpt)
1366 				*mpt = r;
1367 		}
1368 	}
1369 
1370 	spin_unlock_irq(mlx4_tlock(dev));
1371 
1372 	return err;
1373 }
1374 
eq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_eq_states state,struct res_eq ** eq)1375 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1376 				enum res_eq_states state, struct res_eq **eq)
1377 {
1378 	struct mlx4_priv *priv = mlx4_priv(dev);
1379 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1380 	struct res_eq *r;
1381 	int err = 0;
1382 
1383 	spin_lock_irq(mlx4_tlock(dev));
1384 	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1385 	if (!r)
1386 		err = -ENOENT;
1387 	else if (r->com.owner != slave)
1388 		err = -EPERM;
1389 	else {
1390 		switch (state) {
1391 		case RES_EQ_BUSY:
1392 			err = -EINVAL;
1393 			break;
1394 
1395 		case RES_EQ_RESERVED:
1396 			if (r->com.state != RES_EQ_HW)
1397 				err = -EINVAL;
1398 			break;
1399 
1400 		case RES_EQ_HW:
1401 			if (r->com.state != RES_EQ_RESERVED)
1402 				err = -EINVAL;
1403 			break;
1404 
1405 		default:
1406 			err = -EINVAL;
1407 		}
1408 
1409 		if (!err) {
1410 			r->com.from_state = r->com.state;
1411 			r->com.to_state = state;
1412 			r->com.state = RES_EQ_BUSY;
1413 			if (eq)
1414 				*eq = r;
1415 		}
1416 	}
1417 
1418 	spin_unlock_irq(mlx4_tlock(dev));
1419 
1420 	return err;
1421 }
1422 
cq_res_start_move_to(struct mlx4_dev * dev,int slave,int cqn,enum res_cq_states state,struct res_cq ** cq)1423 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1424 				enum res_cq_states state, struct res_cq **cq)
1425 {
1426 	struct mlx4_priv *priv = mlx4_priv(dev);
1427 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1428 	struct res_cq *r;
1429 	int err;
1430 
1431 	spin_lock_irq(mlx4_tlock(dev));
1432 	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1433 	if (!r) {
1434 		err = -ENOENT;
1435 	} else if (r->com.owner != slave) {
1436 		err = -EPERM;
1437 	} else if (state == RES_CQ_ALLOCATED) {
1438 		if (r->com.state != RES_CQ_HW)
1439 			err = -EINVAL;
1440 		else if (atomic_read(&r->ref_count))
1441 			err = -EBUSY;
1442 		else
1443 			err = 0;
1444 	} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1445 		err = -EINVAL;
1446 	} else {
1447 		err = 0;
1448 	}
1449 
1450 	if (!err) {
1451 		r->com.from_state = r->com.state;
1452 		r->com.to_state = state;
1453 		r->com.state = RES_CQ_BUSY;
1454 		if (cq)
1455 			*cq = r;
1456 	}
1457 
1458 	spin_unlock_irq(mlx4_tlock(dev));
1459 
1460 	return err;
1461 }
1462 
srq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_srq_states state,struct res_srq ** srq)1463 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1464 				 enum res_srq_states state, struct res_srq **srq)
1465 {
1466 	struct mlx4_priv *priv = mlx4_priv(dev);
1467 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1468 	struct res_srq *r;
1469 	int err = 0;
1470 
1471 	spin_lock_irq(mlx4_tlock(dev));
1472 	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1473 	if (!r) {
1474 		err = -ENOENT;
1475 	} else if (r->com.owner != slave) {
1476 		err = -EPERM;
1477 	} else if (state == RES_SRQ_ALLOCATED) {
1478 		if (r->com.state != RES_SRQ_HW)
1479 			err = -EINVAL;
1480 		else if (atomic_read(&r->ref_count))
1481 			err = -EBUSY;
1482 	} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1483 		err = -EINVAL;
1484 	}
1485 
1486 	if (!err) {
1487 		r->com.from_state = r->com.state;
1488 		r->com.to_state = state;
1489 		r->com.state = RES_SRQ_BUSY;
1490 		if (srq)
1491 			*srq = r;
1492 	}
1493 
1494 	spin_unlock_irq(mlx4_tlock(dev));
1495 
1496 	return err;
1497 }
1498 
res_abort_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1499 static void res_abort_move(struct mlx4_dev *dev, int slave,
1500 			   enum mlx4_resource type, int id)
1501 {
1502 	struct mlx4_priv *priv = mlx4_priv(dev);
1503 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504 	struct res_common *r;
1505 
1506 	spin_lock_irq(mlx4_tlock(dev));
1507 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1508 	if (r && (r->owner == slave))
1509 		r->state = r->from_state;
1510 	spin_unlock_irq(mlx4_tlock(dev));
1511 }
1512 
res_end_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1513 static void res_end_move(struct mlx4_dev *dev, int slave,
1514 			 enum mlx4_resource type, int id)
1515 {
1516 	struct mlx4_priv *priv = mlx4_priv(dev);
1517 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1518 	struct res_common *r;
1519 
1520 	spin_lock_irq(mlx4_tlock(dev));
1521 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1522 	if (r && (r->owner == slave))
1523 		r->state = r->to_state;
1524 	spin_unlock_irq(mlx4_tlock(dev));
1525 }
1526 
valid_reserved(struct mlx4_dev * dev,int slave,int qpn)1527 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1528 {
1529 	return mlx4_is_qp_reserved(dev, qpn) &&
1530 		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1531 }
1532 
fw_reserved(struct mlx4_dev * dev,int qpn)1533 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1534 {
1535 	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1536 }
1537 
qp_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1538 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539 			u64 in_param, u64 *out_param)
1540 {
1541 	int err;
1542 	int count;
1543 	int align;
1544 	int base;
1545 	int qpn;
1546 
1547 	switch (op) {
1548 	case RES_OP_RESERVE:
1549 		count = get_param_l(&in_param) & 0xffffff;
1550 		align = get_param_h(&in_param);
1551 		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1552 		if (err)
1553 			return err;
1554 
1555 		err = __mlx4_qp_reserve_range(dev, count, align, &base);
1556 		if (err) {
1557 			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1558 			return err;
1559 		}
1560 
1561 		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1562 		if (err) {
1563 			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1564 			__mlx4_qp_release_range(dev, base, count);
1565 			return err;
1566 		}
1567 		set_param_l(out_param, base);
1568 		break;
1569 	case RES_OP_MAP_ICM:
1570 		qpn = get_param_l(&in_param) & 0x7fffff;
1571 		if (valid_reserved(dev, slave, qpn)) {
1572 			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1573 			if (err)
1574 				return err;
1575 		}
1576 
1577 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1578 					   NULL, 1);
1579 		if (err)
1580 			return err;
1581 
1582 		if (!fw_reserved(dev, qpn)) {
1583 			err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1584 			if (err) {
1585 				res_abort_move(dev, slave, RES_QP, qpn);
1586 				return err;
1587 			}
1588 		}
1589 
1590 		res_end_move(dev, slave, RES_QP, qpn);
1591 		break;
1592 
1593 	default:
1594 		err = -EINVAL;
1595 		break;
1596 	}
1597 	return err;
1598 }
1599 
mtt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1600 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1601 			 u64 in_param, u64 *out_param)
1602 {
1603 	int err = -EINVAL;
1604 	int base;
1605 	int order;
1606 
1607 	if (op != RES_OP_RESERVE_AND_MAP)
1608 		return err;
1609 
1610 	order = get_param_l(&in_param);
1611 
1612 	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1613 	if (err)
1614 		return err;
1615 
1616 	base = __mlx4_alloc_mtt_range(dev, order);
1617 	if (base == -1) {
1618 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1619 		return -ENOMEM;
1620 	}
1621 
1622 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1623 	if (err) {
1624 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1625 		__mlx4_free_mtt_range(dev, base, order);
1626 	} else {
1627 		set_param_l(out_param, base);
1628 	}
1629 
1630 	return err;
1631 }
1632 
mpt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1633 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1634 			 u64 in_param, u64 *out_param)
1635 {
1636 	int err = -EINVAL;
1637 	int index;
1638 	int id;
1639 	struct res_mpt *mpt;
1640 
1641 	switch (op) {
1642 	case RES_OP_RESERVE:
1643 		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1644 		if (err)
1645 			break;
1646 
1647 		index = __mlx4_mpt_reserve(dev);
1648 		if (index == -1) {
1649 			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1650 			break;
1651 		}
1652 		id = index & mpt_mask(dev);
1653 
1654 		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1655 		if (err) {
1656 			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1657 			__mlx4_mpt_release(dev, index);
1658 			break;
1659 		}
1660 		set_param_l(out_param, index);
1661 		break;
1662 	case RES_OP_MAP_ICM:
1663 		index = get_param_l(&in_param);
1664 		id = index & mpt_mask(dev);
1665 		err = mr_res_start_move_to(dev, slave, id,
1666 					   RES_MPT_MAPPED, &mpt);
1667 		if (err)
1668 			return err;
1669 
1670 		err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1671 		if (err) {
1672 			res_abort_move(dev, slave, RES_MPT, id);
1673 			return err;
1674 		}
1675 
1676 		res_end_move(dev, slave, RES_MPT, id);
1677 		break;
1678 	}
1679 	return err;
1680 }
1681 
cq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1682 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1683 			u64 in_param, u64 *out_param)
1684 {
1685 	int cqn;
1686 	int err;
1687 
1688 	switch (op) {
1689 	case RES_OP_RESERVE_AND_MAP:
1690 		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1691 		if (err)
1692 			break;
1693 
1694 		err = __mlx4_cq_alloc_icm(dev, &cqn);
1695 		if (err) {
1696 			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1697 			break;
1698 		}
1699 
1700 		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1701 		if (err) {
1702 			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1703 			__mlx4_cq_free_icm(dev, cqn);
1704 			break;
1705 		}
1706 
1707 		set_param_l(out_param, cqn);
1708 		break;
1709 
1710 	default:
1711 		err = -EINVAL;
1712 	}
1713 
1714 	return err;
1715 }
1716 
srq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1717 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1718 			 u64 in_param, u64 *out_param)
1719 {
1720 	int srqn;
1721 	int err;
1722 
1723 	switch (op) {
1724 	case RES_OP_RESERVE_AND_MAP:
1725 		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1726 		if (err)
1727 			break;
1728 
1729 		err = __mlx4_srq_alloc_icm(dev, &srqn);
1730 		if (err) {
1731 			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1732 			break;
1733 		}
1734 
1735 		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1736 		if (err) {
1737 			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1738 			__mlx4_srq_free_icm(dev, srqn);
1739 			break;
1740 		}
1741 
1742 		set_param_l(out_param, srqn);
1743 		break;
1744 
1745 	default:
1746 		err = -EINVAL;
1747 	}
1748 
1749 	return err;
1750 }
1751 
mac_find_smac_ix_in_slave(struct mlx4_dev * dev,int slave,int port,u8 smac_index,u64 * mac)1752 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1753 				     u8 smac_index, u64 *mac)
1754 {
1755 	struct mlx4_priv *priv = mlx4_priv(dev);
1756 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1757 	struct list_head *mac_list =
1758 		&tracker->slave_list[slave].res_list[RES_MAC];
1759 	struct mac_res *res, *tmp;
1760 
1761 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1762 		if (res->smac_index == smac_index && res->port == (u8) port) {
1763 			*mac = res->mac;
1764 			return 0;
1765 		}
1766 	}
1767 	return -ENOENT;
1768 }
1769 
mac_add_to_slave(struct mlx4_dev * dev,int slave,u64 mac,int port,u8 smac_index)1770 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1771 {
1772 	struct mlx4_priv *priv = mlx4_priv(dev);
1773 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1774 	struct list_head *mac_list =
1775 		&tracker->slave_list[slave].res_list[RES_MAC];
1776 	struct mac_res *res, *tmp;
1777 
1778 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1779 		if (res->mac == mac && res->port == (u8) port) {
1780 			/* mac found. update ref count */
1781 			++res->ref_count;
1782 			return 0;
1783 		}
1784 	}
1785 
1786 	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1787 		return -EINVAL;
1788 	res = kzalloc(sizeof *res, GFP_KERNEL);
1789 	if (!res) {
1790 		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1791 		return -ENOMEM;
1792 	}
1793 	res->mac = mac;
1794 	res->port = (u8) port;
1795 	res->smac_index = smac_index;
1796 	res->ref_count = 1;
1797 	list_add_tail(&res->list,
1798 		      &tracker->slave_list[slave].res_list[RES_MAC]);
1799 	return 0;
1800 }
1801 
mac_del_from_slave(struct mlx4_dev * dev,int slave,u64 mac,int port)1802 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1803 			       int port)
1804 {
1805 	struct mlx4_priv *priv = mlx4_priv(dev);
1806 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1807 	struct list_head *mac_list =
1808 		&tracker->slave_list[slave].res_list[RES_MAC];
1809 	struct mac_res *res, *tmp;
1810 
1811 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1812 		if (res->mac == mac && res->port == (u8) port) {
1813 			if (!--res->ref_count) {
1814 				list_del(&res->list);
1815 				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1816 				kfree(res);
1817 			}
1818 			break;
1819 		}
1820 	}
1821 }
1822 
rem_slave_macs(struct mlx4_dev * dev,int slave)1823 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1824 {
1825 	struct mlx4_priv *priv = mlx4_priv(dev);
1826 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1827 	struct list_head *mac_list =
1828 		&tracker->slave_list[slave].res_list[RES_MAC];
1829 	struct mac_res *res, *tmp;
1830 	int i;
1831 
1832 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1833 		list_del(&res->list);
1834 		/* dereference the mac the num times the slave referenced it */
1835 		for (i = 0; i < res->ref_count; i++)
1836 			__mlx4_unregister_mac(dev, res->port, res->mac);
1837 		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1838 		kfree(res);
1839 	}
1840 }
1841 
mac_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)1842 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1843 			 u64 in_param, u64 *out_param, int in_port)
1844 {
1845 	int err = -EINVAL;
1846 	int port;
1847 	u64 mac;
1848 	u8 smac_index;
1849 
1850 	if (op != RES_OP_RESERVE_AND_MAP)
1851 		return err;
1852 
1853 	port = !in_port ? get_param_l(out_param) : in_port;
1854 	port = mlx4_slave_convert_port(
1855 			dev, slave, port);
1856 
1857 	if (port < 0)
1858 		return -EINVAL;
1859 	mac = in_param;
1860 
1861 	err = __mlx4_register_mac(dev, port, mac);
1862 	if (err >= 0) {
1863 		smac_index = err;
1864 		set_param_l(out_param, err);
1865 		err = 0;
1866 	}
1867 
1868 	if (!err) {
1869 		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1870 		if (err)
1871 			__mlx4_unregister_mac(dev, port, mac);
1872 	}
1873 	return err;
1874 }
1875 
vlan_add_to_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port,int vlan_index)1876 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1877 			     int port, int vlan_index)
1878 {
1879 	struct mlx4_priv *priv = mlx4_priv(dev);
1880 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1881 	struct list_head *vlan_list =
1882 		&tracker->slave_list[slave].res_list[RES_VLAN];
1883 	struct vlan_res *res, *tmp;
1884 
1885 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1886 		if (res->vlan == vlan && res->port == (u8) port) {
1887 			/* vlan found. update ref count */
1888 			++res->ref_count;
1889 			return 0;
1890 		}
1891 	}
1892 
1893 	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1894 		return -EINVAL;
1895 	res = kzalloc(sizeof(*res), GFP_KERNEL);
1896 	if (!res) {
1897 		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1898 		return -ENOMEM;
1899 	}
1900 	res->vlan = vlan;
1901 	res->port = (u8) port;
1902 	res->vlan_index = vlan_index;
1903 	res->ref_count = 1;
1904 	list_add_tail(&res->list,
1905 		      &tracker->slave_list[slave].res_list[RES_VLAN]);
1906 	return 0;
1907 }
1908 
1909 
vlan_del_from_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port)1910 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1911 				int port)
1912 {
1913 	struct mlx4_priv *priv = mlx4_priv(dev);
1914 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1915 	struct list_head *vlan_list =
1916 		&tracker->slave_list[slave].res_list[RES_VLAN];
1917 	struct vlan_res *res, *tmp;
1918 
1919 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1920 		if (res->vlan == vlan && res->port == (u8) port) {
1921 			if (!--res->ref_count) {
1922 				list_del(&res->list);
1923 				mlx4_release_resource(dev, slave, RES_VLAN,
1924 						      1, port);
1925 				kfree(res);
1926 			}
1927 			break;
1928 		}
1929 	}
1930 }
1931 
rem_slave_vlans(struct mlx4_dev * dev,int slave)1932 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1933 {
1934 	struct mlx4_priv *priv = mlx4_priv(dev);
1935 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1936 	struct list_head *vlan_list =
1937 		&tracker->slave_list[slave].res_list[RES_VLAN];
1938 	struct vlan_res *res, *tmp;
1939 	int i;
1940 
1941 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1942 		list_del(&res->list);
1943 		/* dereference the vlan the num times the slave referenced it */
1944 		for (i = 0; i < res->ref_count; i++)
1945 			__mlx4_unregister_vlan(dev, res->port, res->vlan);
1946 		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1947 		kfree(res);
1948 	}
1949 }
1950 
vlan_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)1951 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1952 			  u64 in_param, u64 *out_param, int in_port)
1953 {
1954 	struct mlx4_priv *priv = mlx4_priv(dev);
1955 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1956 	int err;
1957 	u16 vlan;
1958 	int vlan_index;
1959 	int port;
1960 
1961 	port = !in_port ? get_param_l(out_param) : in_port;
1962 
1963 	if (!port || op != RES_OP_RESERVE_AND_MAP)
1964 		return -EINVAL;
1965 
1966 	port = mlx4_slave_convert_port(
1967 			dev, slave, port);
1968 
1969 	if (port < 0)
1970 		return -EINVAL;
1971 	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1972 	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1973 		slave_state[slave].old_vlan_api = true;
1974 		return 0;
1975 	}
1976 
1977 	vlan = (u16) in_param;
1978 
1979 	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1980 	if (!err) {
1981 		set_param_l(out_param, (u32) vlan_index);
1982 		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1983 		if (err)
1984 			__mlx4_unregister_vlan(dev, port, vlan);
1985 	}
1986 	return err;
1987 }
1988 
counter_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1989 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1990 			     u64 in_param, u64 *out_param)
1991 {
1992 	u32 index;
1993 	int err;
1994 
1995 	if (op != RES_OP_RESERVE)
1996 		return -EINVAL;
1997 
1998 	err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1999 	if (err)
2000 		return err;
2001 
2002 	err = __mlx4_counter_alloc(dev, &index);
2003 	if (err) {
2004 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2005 		return err;
2006 	}
2007 
2008 	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2009 	if (err) {
2010 		__mlx4_counter_free(dev, index);
2011 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2012 	} else {
2013 		set_param_l(out_param, index);
2014 	}
2015 
2016 	return err;
2017 }
2018 
xrcdn_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2019 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2020 			   u64 in_param, u64 *out_param)
2021 {
2022 	u32 xrcdn;
2023 	int err;
2024 
2025 	if (op != RES_OP_RESERVE)
2026 		return -EINVAL;
2027 
2028 	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2029 	if (err)
2030 		return err;
2031 
2032 	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2033 	if (err)
2034 		__mlx4_xrcd_free(dev, xrcdn);
2035 	else
2036 		set_param_l(out_param, xrcdn);
2037 
2038 	return err;
2039 }
2040 
mlx4_ALLOC_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2041 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2042 			   struct mlx4_vhcr *vhcr,
2043 			   struct mlx4_cmd_mailbox *inbox,
2044 			   struct mlx4_cmd_mailbox *outbox,
2045 			   struct mlx4_cmd_info *cmd)
2046 {
2047 	int err;
2048 	int alop = vhcr->op_modifier;
2049 
2050 	switch (vhcr->in_modifier & 0xFF) {
2051 	case RES_QP:
2052 		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2053 				   vhcr->in_param, &vhcr->out_param);
2054 		break;
2055 
2056 	case RES_MTT:
2057 		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2058 				    vhcr->in_param, &vhcr->out_param);
2059 		break;
2060 
2061 	case RES_MPT:
2062 		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2063 				    vhcr->in_param, &vhcr->out_param);
2064 		break;
2065 
2066 	case RES_CQ:
2067 		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2068 				   vhcr->in_param, &vhcr->out_param);
2069 		break;
2070 
2071 	case RES_SRQ:
2072 		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073 				    vhcr->in_param, &vhcr->out_param);
2074 		break;
2075 
2076 	case RES_MAC:
2077 		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2078 				    vhcr->in_param, &vhcr->out_param,
2079 				    (vhcr->in_modifier >> 8) & 0xFF);
2080 		break;
2081 
2082 	case RES_VLAN:
2083 		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2084 				     vhcr->in_param, &vhcr->out_param,
2085 				     (vhcr->in_modifier >> 8) & 0xFF);
2086 		break;
2087 
2088 	case RES_COUNTER:
2089 		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2090 					vhcr->in_param, &vhcr->out_param);
2091 		break;
2092 
2093 	case RES_XRCD:
2094 		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2095 				      vhcr->in_param, &vhcr->out_param);
2096 		break;
2097 
2098 	default:
2099 		err = -EINVAL;
2100 		break;
2101 	}
2102 
2103 	return err;
2104 }
2105 
qp_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2106 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2107 		       u64 in_param)
2108 {
2109 	int err;
2110 	int count;
2111 	int base;
2112 	int qpn;
2113 
2114 	switch (op) {
2115 	case RES_OP_RESERVE:
2116 		base = get_param_l(&in_param) & 0x7fffff;
2117 		count = get_param_h(&in_param);
2118 		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2119 		if (err)
2120 			break;
2121 		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2122 		__mlx4_qp_release_range(dev, base, count);
2123 		break;
2124 	case RES_OP_MAP_ICM:
2125 		qpn = get_param_l(&in_param) & 0x7fffff;
2126 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2127 					   NULL, 0);
2128 		if (err)
2129 			return err;
2130 
2131 		if (!fw_reserved(dev, qpn))
2132 			__mlx4_qp_free_icm(dev, qpn);
2133 
2134 		res_end_move(dev, slave, RES_QP, qpn);
2135 
2136 		if (valid_reserved(dev, slave, qpn))
2137 			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2138 		break;
2139 	default:
2140 		err = -EINVAL;
2141 		break;
2142 	}
2143 	return err;
2144 }
2145 
mtt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2146 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2147 			u64 in_param, u64 *out_param)
2148 {
2149 	int err = -EINVAL;
2150 	int base;
2151 	int order;
2152 
2153 	if (op != RES_OP_RESERVE_AND_MAP)
2154 		return err;
2155 
2156 	base = get_param_l(&in_param);
2157 	order = get_param_h(&in_param);
2158 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2159 	if (!err) {
2160 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2161 		__mlx4_free_mtt_range(dev, base, order);
2162 	}
2163 	return err;
2164 }
2165 
mpt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2166 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2167 			u64 in_param)
2168 {
2169 	int err = -EINVAL;
2170 	int index;
2171 	int id;
2172 	struct res_mpt *mpt;
2173 
2174 	switch (op) {
2175 	case RES_OP_RESERVE:
2176 		index = get_param_l(&in_param);
2177 		id = index & mpt_mask(dev);
2178 		err = get_res(dev, slave, id, RES_MPT, &mpt);
2179 		if (err)
2180 			break;
2181 		index = mpt->key;
2182 		put_res(dev, slave, id, RES_MPT);
2183 
2184 		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2185 		if (err)
2186 			break;
2187 		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2188 		__mlx4_mpt_release(dev, index);
2189 		break;
2190 	case RES_OP_MAP_ICM:
2191 			index = get_param_l(&in_param);
2192 			id = index & mpt_mask(dev);
2193 			err = mr_res_start_move_to(dev, slave, id,
2194 						   RES_MPT_RESERVED, &mpt);
2195 			if (err)
2196 				return err;
2197 
2198 			__mlx4_mpt_free_icm(dev, mpt->key);
2199 			res_end_move(dev, slave, RES_MPT, id);
2200 			return err;
2201 		break;
2202 	default:
2203 		err = -EINVAL;
2204 		break;
2205 	}
2206 	return err;
2207 }
2208 
cq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2209 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2210 		       u64 in_param, u64 *out_param)
2211 {
2212 	int cqn;
2213 	int err;
2214 
2215 	switch (op) {
2216 	case RES_OP_RESERVE_AND_MAP:
2217 		cqn = get_param_l(&in_param);
2218 		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2219 		if (err)
2220 			break;
2221 
2222 		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2223 		__mlx4_cq_free_icm(dev, cqn);
2224 		break;
2225 
2226 	default:
2227 		err = -EINVAL;
2228 		break;
2229 	}
2230 
2231 	return err;
2232 }
2233 
srq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2234 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2235 			u64 in_param, u64 *out_param)
2236 {
2237 	int srqn;
2238 	int err;
2239 
2240 	switch (op) {
2241 	case RES_OP_RESERVE_AND_MAP:
2242 		srqn = get_param_l(&in_param);
2243 		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2244 		if (err)
2245 			break;
2246 
2247 		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2248 		__mlx4_srq_free_icm(dev, srqn);
2249 		break;
2250 
2251 	default:
2252 		err = -EINVAL;
2253 		break;
2254 	}
2255 
2256 	return err;
2257 }
2258 
mac_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2259 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2260 			    u64 in_param, u64 *out_param, int in_port)
2261 {
2262 	int port;
2263 	int err = 0;
2264 
2265 	switch (op) {
2266 	case RES_OP_RESERVE_AND_MAP:
2267 		port = !in_port ? get_param_l(out_param) : in_port;
2268 		port = mlx4_slave_convert_port(
2269 				dev, slave, port);
2270 
2271 		if (port < 0)
2272 			return -EINVAL;
2273 		mac_del_from_slave(dev, slave, in_param, port);
2274 		__mlx4_unregister_mac(dev, port, in_param);
2275 		break;
2276 	default:
2277 		err = -EINVAL;
2278 		break;
2279 	}
2280 
2281 	return err;
2282 
2283 }
2284 
vlan_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2285 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2286 			    u64 in_param, u64 *out_param, int port)
2287 {
2288 	struct mlx4_priv *priv = mlx4_priv(dev);
2289 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2290 	int err = 0;
2291 
2292 	port = mlx4_slave_convert_port(
2293 			dev, slave, port);
2294 
2295 	if (port < 0)
2296 		return -EINVAL;
2297 	switch (op) {
2298 	case RES_OP_RESERVE_AND_MAP:
2299 		if (slave_state[slave].old_vlan_api)
2300 			return 0;
2301 		if (!port)
2302 			return -EINVAL;
2303 		vlan_del_from_slave(dev, slave, in_param, port);
2304 		__mlx4_unregister_vlan(dev, port, in_param);
2305 		break;
2306 	default:
2307 		err = -EINVAL;
2308 		break;
2309 	}
2310 
2311 	return err;
2312 }
2313 
counter_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2314 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2315 			    u64 in_param, u64 *out_param)
2316 {
2317 	int index;
2318 	int err;
2319 
2320 	if (op != RES_OP_RESERVE)
2321 		return -EINVAL;
2322 
2323 	index = get_param_l(&in_param);
2324 	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2325 	if (err)
2326 		return err;
2327 
2328 	__mlx4_counter_free(dev, index);
2329 	mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2330 
2331 	return err;
2332 }
2333 
xrcdn_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2334 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2335 			  u64 in_param, u64 *out_param)
2336 {
2337 	int xrcdn;
2338 	int err;
2339 
2340 	if (op != RES_OP_RESERVE)
2341 		return -EINVAL;
2342 
2343 	xrcdn = get_param_l(&in_param);
2344 	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2345 	if (err)
2346 		return err;
2347 
2348 	__mlx4_xrcd_free(dev, xrcdn);
2349 
2350 	return err;
2351 }
2352 
mlx4_FREE_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2353 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2354 			  struct mlx4_vhcr *vhcr,
2355 			  struct mlx4_cmd_mailbox *inbox,
2356 			  struct mlx4_cmd_mailbox *outbox,
2357 			  struct mlx4_cmd_info *cmd)
2358 {
2359 	int err = -EINVAL;
2360 	int alop = vhcr->op_modifier;
2361 
2362 	switch (vhcr->in_modifier & 0xFF) {
2363 	case RES_QP:
2364 		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2365 				  vhcr->in_param);
2366 		break;
2367 
2368 	case RES_MTT:
2369 		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2370 				   vhcr->in_param, &vhcr->out_param);
2371 		break;
2372 
2373 	case RES_MPT:
2374 		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2375 				   vhcr->in_param);
2376 		break;
2377 
2378 	case RES_CQ:
2379 		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2380 				  vhcr->in_param, &vhcr->out_param);
2381 		break;
2382 
2383 	case RES_SRQ:
2384 		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2385 				   vhcr->in_param, &vhcr->out_param);
2386 		break;
2387 
2388 	case RES_MAC:
2389 		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2390 				   vhcr->in_param, &vhcr->out_param,
2391 				   (vhcr->in_modifier >> 8) & 0xFF);
2392 		break;
2393 
2394 	case RES_VLAN:
2395 		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2396 				    vhcr->in_param, &vhcr->out_param,
2397 				    (vhcr->in_modifier >> 8) & 0xFF);
2398 		break;
2399 
2400 	case RES_COUNTER:
2401 		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2402 				       vhcr->in_param, &vhcr->out_param);
2403 		break;
2404 
2405 	case RES_XRCD:
2406 		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2407 				     vhcr->in_param, &vhcr->out_param);
2408 
2409 	default:
2410 		break;
2411 	}
2412 	return err;
2413 }
2414 
2415 /* ugly but other choices are uglier */
mr_phys_mpt(struct mlx4_mpt_entry * mpt)2416 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2417 {
2418 	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2419 }
2420 
mr_get_mtt_addr(struct mlx4_mpt_entry * mpt)2421 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2422 {
2423 	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2424 }
2425 
mr_get_mtt_size(struct mlx4_mpt_entry * mpt)2426 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2427 {
2428 	return be32_to_cpu(mpt->mtt_sz);
2429 }
2430 
mr_get_pd(struct mlx4_mpt_entry * mpt)2431 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2432 {
2433 	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2434 }
2435 
mr_is_fmr(struct mlx4_mpt_entry * mpt)2436 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2437 {
2438 	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2439 }
2440 
mr_is_bind_enabled(struct mlx4_mpt_entry * mpt)2441 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2442 {
2443 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2444 }
2445 
mr_is_region(struct mlx4_mpt_entry * mpt)2446 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2447 {
2448 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2449 }
2450 
qp_get_mtt_addr(struct mlx4_qp_context * qpc)2451 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2452 {
2453 	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2454 }
2455 
srq_get_mtt_addr(struct mlx4_srq_context * srqc)2456 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2457 {
2458 	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2459 }
2460 
qp_get_mtt_size(struct mlx4_qp_context * qpc)2461 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2462 {
2463 	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2464 	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2465 	int log_sq_sride = qpc->sq_size_stride & 7;
2466 	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2467 	int log_rq_stride = qpc->rq_size_stride & 7;
2468 	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2469 	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2470 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2471 	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2472 	int sq_size;
2473 	int rq_size;
2474 	int total_pages;
2475 	int total_mem;
2476 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2477 
2478 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2479 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2480 	total_mem = sq_size + rq_size;
2481 	total_pages =
2482 		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2483 				   page_shift);
2484 
2485 	return total_pages;
2486 }
2487 
check_mtt_range(struct mlx4_dev * dev,int slave,int start,int size,struct res_mtt * mtt)2488 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2489 			   int size, struct res_mtt *mtt)
2490 {
2491 	int res_start = mtt->com.res_id;
2492 	int res_size = (1 << mtt->order);
2493 
2494 	if (start < res_start || start + size > res_start + res_size)
2495 		return -EPERM;
2496 	return 0;
2497 }
2498 
mlx4_SW2HW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2499 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2500 			   struct mlx4_vhcr *vhcr,
2501 			   struct mlx4_cmd_mailbox *inbox,
2502 			   struct mlx4_cmd_mailbox *outbox,
2503 			   struct mlx4_cmd_info *cmd)
2504 {
2505 	int err;
2506 	int index = vhcr->in_modifier;
2507 	struct res_mtt *mtt;
2508 	struct res_mpt *mpt;
2509 	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2510 	int phys;
2511 	int id;
2512 	u32 pd;
2513 	int pd_slave;
2514 
2515 	id = index & mpt_mask(dev);
2516 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2517 	if (err)
2518 		return err;
2519 
2520 	/* Disable memory windows for VFs. */
2521 	if (!mr_is_region(inbox->buf)) {
2522 		err = -EPERM;
2523 		goto ex_abort;
2524 	}
2525 
2526 	/* Make sure that the PD bits related to the slave id are zeros. */
2527 	pd = mr_get_pd(inbox->buf);
2528 	pd_slave = (pd >> 17) & 0x7f;
2529 	if (pd_slave != 0 && pd_slave != slave) {
2530 		err = -EPERM;
2531 		goto ex_abort;
2532 	}
2533 
2534 	if (mr_is_fmr(inbox->buf)) {
2535 		/* FMR and Bind Enable are forbidden in slave devices. */
2536 		if (mr_is_bind_enabled(inbox->buf)) {
2537 			err = -EPERM;
2538 			goto ex_abort;
2539 		}
2540 		/* FMR and Memory Windows are also forbidden. */
2541 		if (!mr_is_region(inbox->buf)) {
2542 			err = -EPERM;
2543 			goto ex_abort;
2544 		}
2545 	}
2546 
2547 	phys = mr_phys_mpt(inbox->buf);
2548 	if (!phys) {
2549 		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2550 		if (err)
2551 			goto ex_abort;
2552 
2553 		err = check_mtt_range(dev, slave, mtt_base,
2554 				      mr_get_mtt_size(inbox->buf), mtt);
2555 		if (err)
2556 			goto ex_put;
2557 
2558 		mpt->mtt = mtt;
2559 	}
2560 
2561 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2562 	if (err)
2563 		goto ex_put;
2564 
2565 	if (!phys) {
2566 		atomic_inc(&mtt->ref_count);
2567 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2568 	}
2569 
2570 	res_end_move(dev, slave, RES_MPT, id);
2571 	return 0;
2572 
2573 ex_put:
2574 	if (!phys)
2575 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2576 ex_abort:
2577 	res_abort_move(dev, slave, RES_MPT, id);
2578 
2579 	return err;
2580 }
2581 
mlx4_HW2SW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2582 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2583 			   struct mlx4_vhcr *vhcr,
2584 			   struct mlx4_cmd_mailbox *inbox,
2585 			   struct mlx4_cmd_mailbox *outbox,
2586 			   struct mlx4_cmd_info *cmd)
2587 {
2588 	int err;
2589 	int index = vhcr->in_modifier;
2590 	struct res_mpt *mpt;
2591 	int id;
2592 
2593 	id = index & mpt_mask(dev);
2594 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2595 	if (err)
2596 		return err;
2597 
2598 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2599 	if (err)
2600 		goto ex_abort;
2601 
2602 	if (mpt->mtt)
2603 		atomic_dec(&mpt->mtt->ref_count);
2604 
2605 	res_end_move(dev, slave, RES_MPT, id);
2606 	return 0;
2607 
2608 ex_abort:
2609 	res_abort_move(dev, slave, RES_MPT, id);
2610 
2611 	return err;
2612 }
2613 
mlx4_QUERY_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2614 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2615 			   struct mlx4_vhcr *vhcr,
2616 			   struct mlx4_cmd_mailbox *inbox,
2617 			   struct mlx4_cmd_mailbox *outbox,
2618 			   struct mlx4_cmd_info *cmd)
2619 {
2620 	int err;
2621 	int index = vhcr->in_modifier;
2622 	struct res_mpt *mpt;
2623 	int id;
2624 
2625 	id = index & mpt_mask(dev);
2626 	err = get_res(dev, slave, id, RES_MPT, &mpt);
2627 	if (err)
2628 		return err;
2629 
2630 	if (mpt->com.from_state == RES_MPT_MAPPED) {
2631 		/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2632 		 * that, the VF must read the MPT. But since the MPT entry memory is not
2633 		 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2634 		 * entry contents. To guarantee that the MPT cannot be changed, the driver
2635 		 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2636 		 * ownership fofollowing the change. The change here allows the VF to
2637 		 * perform QUERY_MPT also when the entry is in SW ownership.
2638 		 */
2639 		struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2640 					&mlx4_priv(dev)->mr_table.dmpt_table,
2641 					mpt->key, NULL);
2642 
2643 		if (NULL == mpt_entry || NULL == outbox->buf) {
2644 			err = -EINVAL;
2645 			goto out;
2646 		}
2647 
2648 		memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2649 
2650 		err = 0;
2651 	} else if (mpt->com.from_state == RES_MPT_HW) {
2652 		err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2653 	} else {
2654 		err = -EBUSY;
2655 		goto out;
2656 	}
2657 
2658 
2659 out:
2660 	put_res(dev, slave, id, RES_MPT);
2661 	return err;
2662 }
2663 
qp_get_rcqn(struct mlx4_qp_context * qpc)2664 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2665 {
2666 	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2667 }
2668 
qp_get_scqn(struct mlx4_qp_context * qpc)2669 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2670 {
2671 	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2672 }
2673 
qp_get_srqn(struct mlx4_qp_context * qpc)2674 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2675 {
2676 	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2677 }
2678 
adjust_proxy_tun_qkey(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_qp_context * context)2679 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2680 				  struct mlx4_qp_context *context)
2681 {
2682 	u32 qpn = vhcr->in_modifier & 0xffffff;
2683 	u32 qkey = 0;
2684 
2685 	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2686 		return;
2687 
2688 	/* adjust qkey in qp context */
2689 	context->qkey = cpu_to_be32(qkey);
2690 }
2691 
mlx4_RST2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2692 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2693 			     struct mlx4_vhcr *vhcr,
2694 			     struct mlx4_cmd_mailbox *inbox,
2695 			     struct mlx4_cmd_mailbox *outbox,
2696 			     struct mlx4_cmd_info *cmd)
2697 {
2698 	int err;
2699 	int qpn = vhcr->in_modifier & 0x7fffff;
2700 	struct res_mtt *mtt;
2701 	struct res_qp *qp;
2702 	struct mlx4_qp_context *qpc = inbox->buf + 8;
2703 	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2704 	int mtt_size = qp_get_mtt_size(qpc);
2705 	struct res_cq *rcq;
2706 	struct res_cq *scq;
2707 	int rcqn = qp_get_rcqn(qpc);
2708 	int scqn = qp_get_scqn(qpc);
2709 	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2710 	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2711 	struct res_srq *srq;
2712 	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2713 
2714 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2715 	if (err)
2716 		return err;
2717 	qp->local_qpn = local_qpn;
2718 	qp->sched_queue = 0;
2719 	qp->param3 = 0;
2720 	qp->vlan_control = 0;
2721 	qp->fvl_rx = 0;
2722 	qp->pri_path_fl = 0;
2723 	qp->vlan_index = 0;
2724 	qp->feup = 0;
2725 	qp->qpc_flags = be32_to_cpu(qpc->flags);
2726 
2727 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2728 	if (err)
2729 		goto ex_abort;
2730 
2731 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2732 	if (err)
2733 		goto ex_put_mtt;
2734 
2735 	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2736 	if (err)
2737 		goto ex_put_mtt;
2738 
2739 	if (scqn != rcqn) {
2740 		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2741 		if (err)
2742 			goto ex_put_rcq;
2743 	} else
2744 		scq = rcq;
2745 
2746 	if (use_srq) {
2747 		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2748 		if (err)
2749 			goto ex_put_scq;
2750 	}
2751 
2752 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2753 	update_pkey_index(dev, slave, inbox);
2754 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2755 	if (err)
2756 		goto ex_put_srq;
2757 	atomic_inc(&mtt->ref_count);
2758 	qp->mtt = mtt;
2759 	atomic_inc(&rcq->ref_count);
2760 	qp->rcq = rcq;
2761 	atomic_inc(&scq->ref_count);
2762 	qp->scq = scq;
2763 
2764 	if (scqn != rcqn)
2765 		put_res(dev, slave, scqn, RES_CQ);
2766 
2767 	if (use_srq) {
2768 		atomic_inc(&srq->ref_count);
2769 		put_res(dev, slave, srqn, RES_SRQ);
2770 		qp->srq = srq;
2771 	}
2772 
2773 	/* Save param3 for dynamic changes from VST back to VGT */
2774 	qp->param3 = qpc->param3;
2775 	put_res(dev, slave, rcqn, RES_CQ);
2776 	put_res(dev, slave, mtt_base, RES_MTT);
2777 	res_end_move(dev, slave, RES_QP, qpn);
2778 
2779 	return 0;
2780 
2781 ex_put_srq:
2782 	if (use_srq)
2783 		put_res(dev, slave, srqn, RES_SRQ);
2784 ex_put_scq:
2785 	if (scqn != rcqn)
2786 		put_res(dev, slave, scqn, RES_CQ);
2787 ex_put_rcq:
2788 	put_res(dev, slave, rcqn, RES_CQ);
2789 ex_put_mtt:
2790 	put_res(dev, slave, mtt_base, RES_MTT);
2791 ex_abort:
2792 	res_abort_move(dev, slave, RES_QP, qpn);
2793 
2794 	return err;
2795 }
2796 
eq_get_mtt_addr(struct mlx4_eq_context * eqc)2797 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2798 {
2799 	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2800 }
2801 
eq_get_mtt_size(struct mlx4_eq_context * eqc)2802 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2803 {
2804 	int log_eq_size = eqc->log_eq_size & 0x1f;
2805 	int page_shift = (eqc->log_page_size & 0x3f) + 12;
2806 
2807 	if (log_eq_size + 5 < page_shift)
2808 		return 1;
2809 
2810 	return 1 << (log_eq_size + 5 - page_shift);
2811 }
2812 
cq_get_mtt_addr(struct mlx4_cq_context * cqc)2813 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2814 {
2815 	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2816 }
2817 
cq_get_mtt_size(struct mlx4_cq_context * cqc)2818 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2819 {
2820 	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2821 	int page_shift = (cqc->log_page_size & 0x3f) + 12;
2822 
2823 	if (log_cq_size + 5 < page_shift)
2824 		return 1;
2825 
2826 	return 1 << (log_cq_size + 5 - page_shift);
2827 }
2828 
mlx4_SW2HW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2829 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2830 			  struct mlx4_vhcr *vhcr,
2831 			  struct mlx4_cmd_mailbox *inbox,
2832 			  struct mlx4_cmd_mailbox *outbox,
2833 			  struct mlx4_cmd_info *cmd)
2834 {
2835 	int err;
2836 	int eqn = vhcr->in_modifier;
2837 	int res_id = (slave << 8) | eqn;
2838 	struct mlx4_eq_context *eqc = inbox->buf;
2839 	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2840 	int mtt_size = eq_get_mtt_size(eqc);
2841 	struct res_eq *eq;
2842 	struct res_mtt *mtt;
2843 
2844 	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2845 	if (err)
2846 		return err;
2847 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2848 	if (err)
2849 		goto out_add;
2850 
2851 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2852 	if (err)
2853 		goto out_move;
2854 
2855 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2856 	if (err)
2857 		goto out_put;
2858 
2859 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2860 	if (err)
2861 		goto out_put;
2862 
2863 	atomic_inc(&mtt->ref_count);
2864 	eq->mtt = mtt;
2865 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2866 	res_end_move(dev, slave, RES_EQ, res_id);
2867 	return 0;
2868 
2869 out_put:
2870 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2871 out_move:
2872 	res_abort_move(dev, slave, RES_EQ, res_id);
2873 out_add:
2874 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2875 	return err;
2876 }
2877 
get_containing_mtt(struct mlx4_dev * dev,int slave,int start,int len,struct res_mtt ** res)2878 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2879 			      int len, struct res_mtt **res)
2880 {
2881 	struct mlx4_priv *priv = mlx4_priv(dev);
2882 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2883 	struct res_mtt *mtt;
2884 	int err = -EINVAL;
2885 
2886 	spin_lock_irq(mlx4_tlock(dev));
2887 	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2888 			    com.list) {
2889 		if (!check_mtt_range(dev, slave, start, len, mtt)) {
2890 			*res = mtt;
2891 			mtt->com.from_state = mtt->com.state;
2892 			mtt->com.state = RES_MTT_BUSY;
2893 			err = 0;
2894 			break;
2895 		}
2896 	}
2897 	spin_unlock_irq(mlx4_tlock(dev));
2898 
2899 	return err;
2900 }
2901 
verify_qp_parameters(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,enum qp_transition transition,u8 slave)2902 static int verify_qp_parameters(struct mlx4_dev *dev,
2903 				struct mlx4_vhcr *vhcr,
2904 				struct mlx4_cmd_mailbox *inbox,
2905 				enum qp_transition transition, u8 slave)
2906 {
2907 	u32			qp_type;
2908 	u32			qpn;
2909 	struct mlx4_qp_context	*qp_ctx;
2910 	enum mlx4_qp_optpar	optpar;
2911 	int port;
2912 	int num_gids;
2913 
2914 	qp_ctx  = inbox->buf + 8;
2915 	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2916 	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
2917 
2918 	switch (qp_type) {
2919 	case MLX4_QP_ST_RC:
2920 	case MLX4_QP_ST_XRC:
2921 	case MLX4_QP_ST_UC:
2922 		switch (transition) {
2923 		case QP_TRANS_INIT2RTR:
2924 		case QP_TRANS_RTR2RTS:
2925 		case QP_TRANS_RTS2RTS:
2926 		case QP_TRANS_SQD2SQD:
2927 		case QP_TRANS_SQD2RTS:
2928 			if (slave != mlx4_master_func_num(dev)) {
2929 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2930 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2931 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2932 						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2933 					else
2934 						num_gids = 1;
2935 					if (qp_ctx->pri_path.mgid_index >= num_gids)
2936 						return -EINVAL;
2937 				}
2938 				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2939 					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2940 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2941 						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2942 					else
2943 						num_gids = 1;
2944 					if (qp_ctx->alt_path.mgid_index >= num_gids)
2945 						return -EINVAL;
2946 				}
2947 			}
2948 			break;
2949 		default:
2950 			break;
2951 		}
2952 		break;
2953 
2954 	case MLX4_QP_ST_MLX:
2955 		qpn = vhcr->in_modifier & 0x7fffff;
2956 		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2957 		if (transition == QP_TRANS_INIT2RTR &&
2958 		    slave != mlx4_master_func_num(dev) &&
2959 		    mlx4_is_qp_reserved(dev, qpn) &&
2960 		    !mlx4_vf_smi_enabled(dev, slave, port)) {
2961 			/* only enabled VFs may create MLX proxy QPs */
2962 			mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2963 				 __func__, slave, port);
2964 			return -EPERM;
2965 		}
2966 		break;
2967 
2968 	default:
2969 		break;
2970 	}
2971 
2972 	return 0;
2973 }
2974 
mlx4_WRITE_MTT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2975 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2976 			   struct mlx4_vhcr *vhcr,
2977 			   struct mlx4_cmd_mailbox *inbox,
2978 			   struct mlx4_cmd_mailbox *outbox,
2979 			   struct mlx4_cmd_info *cmd)
2980 {
2981 	struct mlx4_mtt mtt;
2982 	__be64 *page_list = inbox->buf;
2983 	u64 *pg_list = (u64 *)page_list;
2984 	int i;
2985 	struct res_mtt *rmtt = NULL;
2986 	int start = be64_to_cpu(page_list[0]);
2987 	int npages = vhcr->in_modifier;
2988 	int err;
2989 
2990 	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2991 	if (err)
2992 		return err;
2993 
2994 	/* Call the SW implementation of write_mtt:
2995 	 * - Prepare a dummy mtt struct
2996 	 * - Translate inbox contents to simple addresses in host endianess */
2997 	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2998 			    we don't really use it */
2999 	mtt.order = 0;
3000 	mtt.page_shift = 0;
3001 	for (i = 0; i < npages; ++i)
3002 		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3003 
3004 	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3005 			       ((u64 *)page_list + 2));
3006 
3007 	if (rmtt)
3008 		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3009 
3010 	return err;
3011 }
3012 
mlx4_HW2SW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3013 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3014 			  struct mlx4_vhcr *vhcr,
3015 			  struct mlx4_cmd_mailbox *inbox,
3016 			  struct mlx4_cmd_mailbox *outbox,
3017 			  struct mlx4_cmd_info *cmd)
3018 {
3019 	int eqn = vhcr->in_modifier;
3020 	int res_id = eqn | (slave << 8);
3021 	struct res_eq *eq;
3022 	int err;
3023 
3024 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3025 	if (err)
3026 		return err;
3027 
3028 	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3029 	if (err)
3030 		goto ex_abort;
3031 
3032 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3033 	if (err)
3034 		goto ex_put;
3035 
3036 	atomic_dec(&eq->mtt->ref_count);
3037 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3038 	res_end_move(dev, slave, RES_EQ, res_id);
3039 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3040 
3041 	return 0;
3042 
3043 ex_put:
3044 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3045 ex_abort:
3046 	res_abort_move(dev, slave, RES_EQ, res_id);
3047 
3048 	return err;
3049 }
3050 
mlx4_GEN_EQE(struct mlx4_dev * dev,int slave,struct mlx4_eqe * eqe)3051 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3052 {
3053 	struct mlx4_priv *priv = mlx4_priv(dev);
3054 	struct mlx4_slave_event_eq_info *event_eq;
3055 	struct mlx4_cmd_mailbox *mailbox;
3056 	u32 in_modifier = 0;
3057 	int err;
3058 	int res_id;
3059 	struct res_eq *req;
3060 
3061 	if (!priv->mfunc.master.slave_state)
3062 		return -EINVAL;
3063 
3064 	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3065 
3066 	/* Create the event only if the slave is registered */
3067 	if (event_eq->eqn < 0)
3068 		return 0;
3069 
3070 	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3071 	res_id = (slave << 8) | event_eq->eqn;
3072 	err = get_res(dev, slave, res_id, RES_EQ, &req);
3073 	if (err)
3074 		goto unlock;
3075 
3076 	if (req->com.from_state != RES_EQ_HW) {
3077 		err = -EINVAL;
3078 		goto put;
3079 	}
3080 
3081 	mailbox = mlx4_alloc_cmd_mailbox(dev);
3082 	if (IS_ERR(mailbox)) {
3083 		err = PTR_ERR(mailbox);
3084 		goto put;
3085 	}
3086 
3087 	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3088 		++event_eq->token;
3089 		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3090 	}
3091 
3092 	memcpy(mailbox->buf, (u8 *) eqe, 28);
3093 
3094 	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3095 
3096 	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3097 		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3098 		       MLX4_CMD_NATIVE);
3099 
3100 	put_res(dev, slave, res_id, RES_EQ);
3101 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3102 	mlx4_free_cmd_mailbox(dev, mailbox);
3103 	return err;
3104 
3105 put:
3106 	put_res(dev, slave, res_id, RES_EQ);
3107 
3108 unlock:
3109 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3110 	return err;
3111 }
3112 
mlx4_QUERY_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3113 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3114 			  struct mlx4_vhcr *vhcr,
3115 			  struct mlx4_cmd_mailbox *inbox,
3116 			  struct mlx4_cmd_mailbox *outbox,
3117 			  struct mlx4_cmd_info *cmd)
3118 {
3119 	int eqn = vhcr->in_modifier;
3120 	int res_id = eqn | (slave << 8);
3121 	struct res_eq *eq;
3122 	int err;
3123 
3124 	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3125 	if (err)
3126 		return err;
3127 
3128 	if (eq->com.from_state != RES_EQ_HW) {
3129 		err = -EINVAL;
3130 		goto ex_put;
3131 	}
3132 
3133 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3134 
3135 ex_put:
3136 	put_res(dev, slave, res_id, RES_EQ);
3137 	return err;
3138 }
3139 
mlx4_SW2HW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3140 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3141 			  struct mlx4_vhcr *vhcr,
3142 			  struct mlx4_cmd_mailbox *inbox,
3143 			  struct mlx4_cmd_mailbox *outbox,
3144 			  struct mlx4_cmd_info *cmd)
3145 {
3146 	int err;
3147 	int cqn = vhcr->in_modifier;
3148 	struct mlx4_cq_context *cqc = inbox->buf;
3149 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3150 	struct res_cq *cq;
3151 	struct res_mtt *mtt;
3152 
3153 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3154 	if (err)
3155 		return err;
3156 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3157 	if (err)
3158 		goto out_move;
3159 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3160 	if (err)
3161 		goto out_put;
3162 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3163 	if (err)
3164 		goto out_put;
3165 	atomic_inc(&mtt->ref_count);
3166 	cq->mtt = mtt;
3167 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3168 	res_end_move(dev, slave, RES_CQ, cqn);
3169 	return 0;
3170 
3171 out_put:
3172 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3173 out_move:
3174 	res_abort_move(dev, slave, RES_CQ, cqn);
3175 	return err;
3176 }
3177 
mlx4_HW2SW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3178 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3179 			  struct mlx4_vhcr *vhcr,
3180 			  struct mlx4_cmd_mailbox *inbox,
3181 			  struct mlx4_cmd_mailbox *outbox,
3182 			  struct mlx4_cmd_info *cmd)
3183 {
3184 	int err;
3185 	int cqn = vhcr->in_modifier;
3186 	struct res_cq *cq;
3187 
3188 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3189 	if (err)
3190 		return err;
3191 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3192 	if (err)
3193 		goto out_move;
3194 	atomic_dec(&cq->mtt->ref_count);
3195 	res_end_move(dev, slave, RES_CQ, cqn);
3196 	return 0;
3197 
3198 out_move:
3199 	res_abort_move(dev, slave, RES_CQ, cqn);
3200 	return err;
3201 }
3202 
mlx4_QUERY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3203 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3204 			  struct mlx4_vhcr *vhcr,
3205 			  struct mlx4_cmd_mailbox *inbox,
3206 			  struct mlx4_cmd_mailbox *outbox,
3207 			  struct mlx4_cmd_info *cmd)
3208 {
3209 	int cqn = vhcr->in_modifier;
3210 	struct res_cq *cq;
3211 	int err;
3212 
3213 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3214 	if (err)
3215 		return err;
3216 
3217 	if (cq->com.from_state != RES_CQ_HW)
3218 		goto ex_put;
3219 
3220 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3221 ex_put:
3222 	put_res(dev, slave, cqn, RES_CQ);
3223 
3224 	return err;
3225 }
3226 
handle_resize(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd,struct res_cq * cq)3227 static int handle_resize(struct mlx4_dev *dev, int slave,
3228 			 struct mlx4_vhcr *vhcr,
3229 			 struct mlx4_cmd_mailbox *inbox,
3230 			 struct mlx4_cmd_mailbox *outbox,
3231 			 struct mlx4_cmd_info *cmd,
3232 			 struct res_cq *cq)
3233 {
3234 	int err;
3235 	struct res_mtt *orig_mtt;
3236 	struct res_mtt *mtt;
3237 	struct mlx4_cq_context *cqc = inbox->buf;
3238 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3239 
3240 	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3241 	if (err)
3242 		return err;
3243 
3244 	if (orig_mtt != cq->mtt) {
3245 		err = -EINVAL;
3246 		goto ex_put;
3247 	}
3248 
3249 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3250 	if (err)
3251 		goto ex_put;
3252 
3253 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3254 	if (err)
3255 		goto ex_put1;
3256 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3257 	if (err)
3258 		goto ex_put1;
3259 	atomic_dec(&orig_mtt->ref_count);
3260 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3261 	atomic_inc(&mtt->ref_count);
3262 	cq->mtt = mtt;
3263 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3264 	return 0;
3265 
3266 ex_put1:
3267 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3268 ex_put:
3269 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3270 
3271 	return err;
3272 
3273 }
3274 
mlx4_MODIFY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3275 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3276 			   struct mlx4_vhcr *vhcr,
3277 			   struct mlx4_cmd_mailbox *inbox,
3278 			   struct mlx4_cmd_mailbox *outbox,
3279 			   struct mlx4_cmd_info *cmd)
3280 {
3281 	int cqn = vhcr->in_modifier;
3282 	struct res_cq *cq;
3283 	int err;
3284 
3285 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3286 	if (err)
3287 		return err;
3288 
3289 	if (cq->com.from_state != RES_CQ_HW)
3290 		goto ex_put;
3291 
3292 	if (vhcr->op_modifier == 0) {
3293 		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3294 		goto ex_put;
3295 	}
3296 
3297 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3298 ex_put:
3299 	put_res(dev, slave, cqn, RES_CQ);
3300 
3301 	return err;
3302 }
3303 
srq_get_mtt_size(struct mlx4_srq_context * srqc)3304 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3305 {
3306 	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3307 	int log_rq_stride = srqc->logstride & 7;
3308 	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3309 
3310 	if (log_srq_size + log_rq_stride + 4 < page_shift)
3311 		return 1;
3312 
3313 	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3314 }
3315 
mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3316 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3317 			   struct mlx4_vhcr *vhcr,
3318 			   struct mlx4_cmd_mailbox *inbox,
3319 			   struct mlx4_cmd_mailbox *outbox,
3320 			   struct mlx4_cmd_info *cmd)
3321 {
3322 	int err;
3323 	int srqn = vhcr->in_modifier;
3324 	struct res_mtt *mtt;
3325 	struct res_srq *srq;
3326 	struct mlx4_srq_context *srqc = inbox->buf;
3327 	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3328 
3329 	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3330 		return -EINVAL;
3331 
3332 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3333 	if (err)
3334 		return err;
3335 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3336 	if (err)
3337 		goto ex_abort;
3338 	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3339 			      mtt);
3340 	if (err)
3341 		goto ex_put_mtt;
3342 
3343 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3344 	if (err)
3345 		goto ex_put_mtt;
3346 
3347 	atomic_inc(&mtt->ref_count);
3348 	srq->mtt = mtt;
3349 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3350 	res_end_move(dev, slave, RES_SRQ, srqn);
3351 	return 0;
3352 
3353 ex_put_mtt:
3354 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3355 ex_abort:
3356 	res_abort_move(dev, slave, RES_SRQ, srqn);
3357 
3358 	return err;
3359 }
3360 
mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3361 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3362 			   struct mlx4_vhcr *vhcr,
3363 			   struct mlx4_cmd_mailbox *inbox,
3364 			   struct mlx4_cmd_mailbox *outbox,
3365 			   struct mlx4_cmd_info *cmd)
3366 {
3367 	int err;
3368 	int srqn = vhcr->in_modifier;
3369 	struct res_srq *srq;
3370 
3371 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3372 	if (err)
3373 		return err;
3374 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3375 	if (err)
3376 		goto ex_abort;
3377 	atomic_dec(&srq->mtt->ref_count);
3378 	if (srq->cq)
3379 		atomic_dec(&srq->cq->ref_count);
3380 	res_end_move(dev, slave, RES_SRQ, srqn);
3381 
3382 	return 0;
3383 
3384 ex_abort:
3385 	res_abort_move(dev, slave, RES_SRQ, srqn);
3386 
3387 	return err;
3388 }
3389 
mlx4_QUERY_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3390 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3391 			   struct mlx4_vhcr *vhcr,
3392 			   struct mlx4_cmd_mailbox *inbox,
3393 			   struct mlx4_cmd_mailbox *outbox,
3394 			   struct mlx4_cmd_info *cmd)
3395 {
3396 	int err;
3397 	int srqn = vhcr->in_modifier;
3398 	struct res_srq *srq;
3399 
3400 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3401 	if (err)
3402 		return err;
3403 	if (srq->com.from_state != RES_SRQ_HW) {
3404 		err = -EBUSY;
3405 		goto out;
3406 	}
3407 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3408 out:
3409 	put_res(dev, slave, srqn, RES_SRQ);
3410 	return err;
3411 }
3412 
mlx4_ARM_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3413 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3414 			 struct mlx4_vhcr *vhcr,
3415 			 struct mlx4_cmd_mailbox *inbox,
3416 			 struct mlx4_cmd_mailbox *outbox,
3417 			 struct mlx4_cmd_info *cmd)
3418 {
3419 	int err;
3420 	int srqn = vhcr->in_modifier;
3421 	struct res_srq *srq;
3422 
3423 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3424 	if (err)
3425 		return err;
3426 
3427 	if (srq->com.from_state != RES_SRQ_HW) {
3428 		err = -EBUSY;
3429 		goto out;
3430 	}
3431 
3432 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3433 out:
3434 	put_res(dev, slave, srqn, RES_SRQ);
3435 	return err;
3436 }
3437 
mlx4_GEN_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3438 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3439 			struct mlx4_vhcr *vhcr,
3440 			struct mlx4_cmd_mailbox *inbox,
3441 			struct mlx4_cmd_mailbox *outbox,
3442 			struct mlx4_cmd_info *cmd)
3443 {
3444 	int err;
3445 	int qpn = vhcr->in_modifier & 0x7fffff;
3446 	struct res_qp *qp;
3447 
3448 	err = get_res(dev, slave, qpn, RES_QP, &qp);
3449 	if (err)
3450 		return err;
3451 	if (qp->com.from_state != RES_QP_HW) {
3452 		err = -EBUSY;
3453 		goto out;
3454 	}
3455 
3456 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3457 out:
3458 	put_res(dev, slave, qpn, RES_QP);
3459 	return err;
3460 }
3461 
mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3462 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3463 			      struct mlx4_vhcr *vhcr,
3464 			      struct mlx4_cmd_mailbox *inbox,
3465 			      struct mlx4_cmd_mailbox *outbox,
3466 			      struct mlx4_cmd_info *cmd)
3467 {
3468 	struct mlx4_qp_context *context = inbox->buf + 8;
3469 	adjust_proxy_tun_qkey(dev, vhcr, context);
3470 	update_pkey_index(dev, slave, inbox);
3471 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3472 }
3473 
adjust_qp_sched_queue(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3474 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3475 				  struct mlx4_qp_context *qpc,
3476 				  struct mlx4_cmd_mailbox *inbox)
3477 {
3478 	enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3479 	u8 pri_sched_queue;
3480 	int port = mlx4_slave_convert_port(
3481 		   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3482 
3483 	if (port < 0)
3484 		return -EINVAL;
3485 
3486 	pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3487 			  ((port & 1) << 6);
3488 
3489 	if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3490 	    mlx4_is_eth(dev, port + 1)) {
3491 		qpc->pri_path.sched_queue = pri_sched_queue;
3492 	}
3493 
3494 	if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3495 		port = mlx4_slave_convert_port(
3496 				dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3497 				+ 1) - 1;
3498 		if (port < 0)
3499 			return -EINVAL;
3500 		qpc->alt_path.sched_queue =
3501 			(qpc->alt_path.sched_queue & ~(1 << 6)) |
3502 			(port & 1) << 6;
3503 	}
3504 	return 0;
3505 }
3506 
roce_verify_mac(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3507 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3508 				struct mlx4_qp_context *qpc,
3509 				struct mlx4_cmd_mailbox *inbox)
3510 {
3511 	u64 mac;
3512 	int port;
3513 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3514 	u8 sched = *(u8 *)(inbox->buf + 64);
3515 	u8 smac_ix;
3516 
3517 	port = (sched >> 6 & 1) + 1;
3518 	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3519 		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3520 		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3521 			return -ENOENT;
3522 	}
3523 	return 0;
3524 }
3525 
mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3526 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3527 			     struct mlx4_vhcr *vhcr,
3528 			     struct mlx4_cmd_mailbox *inbox,
3529 			     struct mlx4_cmd_mailbox *outbox,
3530 			     struct mlx4_cmd_info *cmd)
3531 {
3532 	int err;
3533 	struct mlx4_qp_context *qpc = inbox->buf + 8;
3534 	int qpn = vhcr->in_modifier & 0x7fffff;
3535 	struct res_qp *qp;
3536 	u8 orig_sched_queue;
3537 	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3538 	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3539 	u8 orig_pri_path_fl = qpc->pri_path.fl;
3540 	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3541 	u8 orig_feup = qpc->pri_path.feup;
3542 
3543 	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3544 	if (err)
3545 		return err;
3546 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3547 	if (err)
3548 		return err;
3549 
3550 	if (roce_verify_mac(dev, slave, qpc, inbox))
3551 		return -EINVAL;
3552 
3553 	update_pkey_index(dev, slave, inbox);
3554 	update_gid(dev, inbox, (u8)slave);
3555 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3556 	orig_sched_queue = qpc->pri_path.sched_queue;
3557 	err = update_vport_qp_param(dev, inbox, slave, qpn);
3558 	if (err)
3559 		return err;
3560 
3561 	err = get_res(dev, slave, qpn, RES_QP, &qp);
3562 	if (err)
3563 		return err;
3564 	if (qp->com.from_state != RES_QP_HW) {
3565 		err = -EBUSY;
3566 		goto out;
3567 	}
3568 
3569 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3570 out:
3571 	/* if no error, save sched queue value passed in by VF. This is
3572 	 * essentially the QOS value provided by the VF. This will be useful
3573 	 * if we allow dynamic changes from VST back to VGT
3574 	 */
3575 	if (!err) {
3576 		qp->sched_queue = orig_sched_queue;
3577 		qp->vlan_control = orig_vlan_control;
3578 		qp->fvl_rx	=  orig_fvl_rx;
3579 		qp->pri_path_fl = orig_pri_path_fl;
3580 		qp->vlan_index  = orig_vlan_index;
3581 		qp->feup	= orig_feup;
3582 	}
3583 	put_res(dev, slave, qpn, RES_QP);
3584 	return err;
3585 }
3586 
mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3587 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3588 			    struct mlx4_vhcr *vhcr,
3589 			    struct mlx4_cmd_mailbox *inbox,
3590 			    struct mlx4_cmd_mailbox *outbox,
3591 			    struct mlx4_cmd_info *cmd)
3592 {
3593 	int err;
3594 	struct mlx4_qp_context *context = inbox->buf + 8;
3595 
3596 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3597 	if (err)
3598 		return err;
3599 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3600 	if (err)
3601 		return err;
3602 
3603 	update_pkey_index(dev, slave, inbox);
3604 	update_gid(dev, inbox, (u8)slave);
3605 	adjust_proxy_tun_qkey(dev, vhcr, context);
3606 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3607 }
3608 
mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3609 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3610 			    struct mlx4_vhcr *vhcr,
3611 			    struct mlx4_cmd_mailbox *inbox,
3612 			    struct mlx4_cmd_mailbox *outbox,
3613 			    struct mlx4_cmd_info *cmd)
3614 {
3615 	int err;
3616 	struct mlx4_qp_context *context = inbox->buf + 8;
3617 
3618 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3619 	if (err)
3620 		return err;
3621 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3622 	if (err)
3623 		return err;
3624 
3625 	update_pkey_index(dev, slave, inbox);
3626 	update_gid(dev, inbox, (u8)slave);
3627 	adjust_proxy_tun_qkey(dev, vhcr, context);
3628 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3629 }
3630 
3631 
mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3632 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3633 			      struct mlx4_vhcr *vhcr,
3634 			      struct mlx4_cmd_mailbox *inbox,
3635 			      struct mlx4_cmd_mailbox *outbox,
3636 			      struct mlx4_cmd_info *cmd)
3637 {
3638 	struct mlx4_qp_context *context = inbox->buf + 8;
3639 	int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3640 	if (err)
3641 		return err;
3642 	adjust_proxy_tun_qkey(dev, vhcr, context);
3643 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3644 }
3645 
mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3646 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3647 			    struct mlx4_vhcr *vhcr,
3648 			    struct mlx4_cmd_mailbox *inbox,
3649 			    struct mlx4_cmd_mailbox *outbox,
3650 			    struct mlx4_cmd_info *cmd)
3651 {
3652 	int err;
3653 	struct mlx4_qp_context *context = inbox->buf + 8;
3654 
3655 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3656 	if (err)
3657 		return err;
3658 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3659 	if (err)
3660 		return err;
3661 
3662 	adjust_proxy_tun_qkey(dev, vhcr, context);
3663 	update_gid(dev, inbox, (u8)slave);
3664 	update_pkey_index(dev, slave, inbox);
3665 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3666 }
3667 
mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3668 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3669 			    struct mlx4_vhcr *vhcr,
3670 			    struct mlx4_cmd_mailbox *inbox,
3671 			    struct mlx4_cmd_mailbox *outbox,
3672 			    struct mlx4_cmd_info *cmd)
3673 {
3674 	int err;
3675 	struct mlx4_qp_context *context = inbox->buf + 8;
3676 
3677 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3678 	if (err)
3679 		return err;
3680 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3681 	if (err)
3682 		return err;
3683 
3684 	adjust_proxy_tun_qkey(dev, vhcr, context);
3685 	update_gid(dev, inbox, (u8)slave);
3686 	update_pkey_index(dev, slave, inbox);
3687 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3688 }
3689 
mlx4_2RST_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3690 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3691 			 struct mlx4_vhcr *vhcr,
3692 			 struct mlx4_cmd_mailbox *inbox,
3693 			 struct mlx4_cmd_mailbox *outbox,
3694 			 struct mlx4_cmd_info *cmd)
3695 {
3696 	int err;
3697 	int qpn = vhcr->in_modifier & 0x7fffff;
3698 	struct res_qp *qp;
3699 
3700 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3701 	if (err)
3702 		return err;
3703 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3704 	if (err)
3705 		goto ex_abort;
3706 
3707 	atomic_dec(&qp->mtt->ref_count);
3708 	atomic_dec(&qp->rcq->ref_count);
3709 	atomic_dec(&qp->scq->ref_count);
3710 	if (qp->srq)
3711 		atomic_dec(&qp->srq->ref_count);
3712 	res_end_move(dev, slave, RES_QP, qpn);
3713 	return 0;
3714 
3715 ex_abort:
3716 	res_abort_move(dev, slave, RES_QP, qpn);
3717 
3718 	return err;
3719 }
3720 
find_gid(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid)3721 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3722 				struct res_qp *rqp, u8 *gid)
3723 {
3724 	struct res_gid *res;
3725 
3726 	list_for_each_entry(res, &rqp->mcg_list, list) {
3727 		if (!memcmp(res->gid, gid, 16))
3728 			return res;
3729 	}
3730 	return NULL;
3731 }
3732 
add_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 reg_id)3733 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3734 		       u8 *gid, enum mlx4_protocol prot,
3735 		       enum mlx4_steer_type steer, u64 reg_id)
3736 {
3737 	struct res_gid *res;
3738 	int err;
3739 
3740 	res = kzalloc(sizeof *res, GFP_KERNEL);
3741 	if (!res)
3742 		return -ENOMEM;
3743 
3744 	spin_lock_irq(&rqp->mcg_spl);
3745 	if (find_gid(dev, slave, rqp, gid)) {
3746 		kfree(res);
3747 		err = -EEXIST;
3748 	} else {
3749 		memcpy(res->gid, gid, 16);
3750 		res->prot = prot;
3751 		res->steer = steer;
3752 		res->reg_id = reg_id;
3753 		list_add_tail(&res->list, &rqp->mcg_list);
3754 		err = 0;
3755 	}
3756 	spin_unlock_irq(&rqp->mcg_spl);
3757 
3758 	return err;
3759 }
3760 
rem_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 * reg_id)3761 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3762 		       u8 *gid, enum mlx4_protocol prot,
3763 		       enum mlx4_steer_type steer, u64 *reg_id)
3764 {
3765 	struct res_gid *res;
3766 	int err;
3767 
3768 	spin_lock_irq(&rqp->mcg_spl);
3769 	res = find_gid(dev, slave, rqp, gid);
3770 	if (!res || res->prot != prot || res->steer != steer)
3771 		err = -EINVAL;
3772 	else {
3773 		*reg_id = res->reg_id;
3774 		list_del(&res->list);
3775 		kfree(res);
3776 		err = 0;
3777 	}
3778 	spin_unlock_irq(&rqp->mcg_spl);
3779 
3780 	return err;
3781 }
3782 
qp_attach(struct mlx4_dev * dev,int slave,struct mlx4_qp * qp,u8 gid[16],int block_loopback,enum mlx4_protocol prot,enum mlx4_steer_type type,u64 * reg_id)3783 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3784 		     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3785 		     enum mlx4_steer_type type, u64 *reg_id)
3786 {
3787 	switch (dev->caps.steering_mode) {
3788 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3789 		int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3790 		if (port < 0)
3791 			return port;
3792 		return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3793 						block_loopback, prot,
3794 						reg_id);
3795 	}
3796 	case MLX4_STEERING_MODE_B0:
3797 		if (prot == MLX4_PROT_ETH) {
3798 			int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3799 			if (port < 0)
3800 				return port;
3801 			gid[5] = port;
3802 		}
3803 		return mlx4_qp_attach_common(dev, qp, gid,
3804 					    block_loopback, prot, type);
3805 	default:
3806 		return -EINVAL;
3807 	}
3808 }
3809 
qp_detach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot,enum mlx4_steer_type type,u64 reg_id)3810 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3811 		     u8 gid[16], enum mlx4_protocol prot,
3812 		     enum mlx4_steer_type type, u64 reg_id)
3813 {
3814 	switch (dev->caps.steering_mode) {
3815 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3816 		return mlx4_flow_detach(dev, reg_id);
3817 	case MLX4_STEERING_MODE_B0:
3818 		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3819 	default:
3820 		return -EINVAL;
3821 	}
3822 }
3823 
mlx4_adjust_port(struct mlx4_dev * dev,int slave,u8 * gid,enum mlx4_protocol prot)3824 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3825 			    u8 *gid, enum mlx4_protocol prot)
3826 {
3827 	int real_port;
3828 
3829 	if (prot != MLX4_PROT_ETH)
3830 		return 0;
3831 
3832 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3833 	    dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3834 		real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3835 		if (real_port < 0)
3836 			return -EINVAL;
3837 		gid[5] = real_port;
3838 	}
3839 
3840 	return 0;
3841 }
3842 
mlx4_QP_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3843 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3844 			       struct mlx4_vhcr *vhcr,
3845 			       struct mlx4_cmd_mailbox *inbox,
3846 			       struct mlx4_cmd_mailbox *outbox,
3847 			       struct mlx4_cmd_info *cmd)
3848 {
3849 	struct mlx4_qp qp; /* dummy for calling attach/detach */
3850 	u8 *gid = inbox->buf;
3851 	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3852 	int err;
3853 	int qpn;
3854 	struct res_qp *rqp;
3855 	u64 reg_id = 0;
3856 	int attach = vhcr->op_modifier;
3857 	int block_loopback = vhcr->in_modifier >> 31;
3858 	u8 steer_type_mask = 2;
3859 	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3860 
3861 	qpn = vhcr->in_modifier & 0xffffff;
3862 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3863 	if (err)
3864 		return err;
3865 
3866 	qp.qpn = qpn;
3867 	if (attach) {
3868 		err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3869 				type, &reg_id);
3870 		if (err) {
3871 			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3872 			goto ex_put;
3873 		}
3874 		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3875 		if (err)
3876 			goto ex_detach;
3877 	} else {
3878 		err = mlx4_adjust_port(dev, slave, gid, prot);
3879 		if (err)
3880 			goto ex_put;
3881 
3882 		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3883 		if (err)
3884 			goto ex_put;
3885 
3886 		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3887 		if (err)
3888 			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3889 			       qpn, reg_id);
3890 	}
3891 	put_res(dev, slave, qpn, RES_QP);
3892 	return err;
3893 
3894 ex_detach:
3895 	qp_detach(dev, &qp, gid, prot, type, reg_id);
3896 ex_put:
3897 	put_res(dev, slave, qpn, RES_QP);
3898 	return err;
3899 }
3900 
3901 /*
3902  * MAC validation for Flow Steering rules.
3903  * VF can attach rules only with a mac address which is assigned to it.
3904  */
validate_eth_header_mac(int slave,struct _rule_hw * eth_header,struct list_head * rlist)3905 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3906 				   struct list_head *rlist)
3907 {
3908 	struct mac_res *res, *tmp;
3909 	__be64 be_mac;
3910 
3911 	/* make sure it isn't multicast or broadcast mac*/
3912 	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3913 	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3914 		list_for_each_entry_safe(res, tmp, rlist, list) {
3915 			be_mac = cpu_to_be64(res->mac << 16);
3916 			if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3917 				return 0;
3918 		}
3919 		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3920 		       eth_header->eth.dst_mac, slave);
3921 		return -EINVAL;
3922 	}
3923 	return 0;
3924 }
3925 
3926 /*
3927  * In case of missing eth header, append eth header with a MAC address
3928  * assigned to the VF.
3929  */
add_eth_header(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox,struct list_head * rlist,int header_id)3930 static int add_eth_header(struct mlx4_dev *dev, int slave,
3931 			  struct mlx4_cmd_mailbox *inbox,
3932 			  struct list_head *rlist, int header_id)
3933 {
3934 	struct mac_res *res, *tmp;
3935 	u8 port;
3936 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3937 	struct mlx4_net_trans_rule_hw_eth *eth_header;
3938 	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3939 	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3940 	__be64 be_mac = 0;
3941 	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3942 
3943 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3944 	port = ctrl->port;
3945 	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3946 
3947 	/* Clear a space in the inbox for eth header */
3948 	switch (header_id) {
3949 	case MLX4_NET_TRANS_RULE_ID_IPV4:
3950 		ip_header =
3951 			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3952 		memmove(ip_header, eth_header,
3953 			sizeof(*ip_header) + sizeof(*l4_header));
3954 		break;
3955 	case MLX4_NET_TRANS_RULE_ID_TCP:
3956 	case MLX4_NET_TRANS_RULE_ID_UDP:
3957 		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3958 			    (eth_header + 1);
3959 		memmove(l4_header, eth_header, sizeof(*l4_header));
3960 		break;
3961 	default:
3962 		return -EINVAL;
3963 	}
3964 	list_for_each_entry_safe(res, tmp, rlist, list) {
3965 		if (port == res->port) {
3966 			be_mac = cpu_to_be64(res->mac << 16);
3967 			break;
3968 		}
3969 	}
3970 	if (!be_mac) {
3971 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3972 		       port);
3973 		return -EINVAL;
3974 	}
3975 
3976 	memset(eth_header, 0, sizeof(*eth_header));
3977 	eth_header->size = sizeof(*eth_header) >> 2;
3978 	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3979 	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3980 	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3981 
3982 	return 0;
3983 
3984 }
3985 
3986 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
mlx4_UPDATE_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd_info)3987 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3988 			   struct mlx4_vhcr *vhcr,
3989 			   struct mlx4_cmd_mailbox *inbox,
3990 			   struct mlx4_cmd_mailbox *outbox,
3991 			   struct mlx4_cmd_info *cmd_info)
3992 {
3993 	int err;
3994 	u32 qpn = vhcr->in_modifier & 0xffffff;
3995 	struct res_qp *rqp;
3996 	u64 mac;
3997 	unsigned port;
3998 	u64 pri_addr_path_mask;
3999 	struct mlx4_update_qp_context *cmd;
4000 	int smac_index;
4001 
4002 	cmd = (struct mlx4_update_qp_context *)inbox->buf;
4003 
4004 	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4005 	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4006 	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4007 		return -EPERM;
4008 
4009 	/* Just change the smac for the QP */
4010 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4011 	if (err) {
4012 		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4013 		return err;
4014 	}
4015 
4016 	port = (rqp->sched_queue >> 6 & 1) + 1;
4017 
4018 	if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4019 		smac_index = cmd->qp_context.pri_path.grh_mylmc;
4020 		err = mac_find_smac_ix_in_slave(dev, slave, port,
4021 						smac_index, &mac);
4022 
4023 		if (err) {
4024 			mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4025 				 qpn, smac_index);
4026 			goto err_mac;
4027 		}
4028 	}
4029 
4030 	err = mlx4_cmd(dev, inbox->dma,
4031 		       vhcr->in_modifier, 0,
4032 		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4033 		       MLX4_CMD_NATIVE);
4034 	if (err) {
4035 		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4036 		goto err_mac;
4037 	}
4038 
4039 err_mac:
4040 	put_res(dev, slave, qpn, RES_QP);
4041 	return err;
4042 }
4043 
mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4044 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4045 					 struct mlx4_vhcr *vhcr,
4046 					 struct mlx4_cmd_mailbox *inbox,
4047 					 struct mlx4_cmd_mailbox *outbox,
4048 					 struct mlx4_cmd_info *cmd)
4049 {
4050 
4051 	struct mlx4_priv *priv = mlx4_priv(dev);
4052 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4053 	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4054 	int err;
4055 	int qpn;
4056 	struct res_qp *rqp;
4057 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4058 	struct _rule_hw  *rule_header;
4059 	int header_id;
4060 
4061 	if (dev->caps.steering_mode !=
4062 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4063 		return -EOPNOTSUPP;
4064 
4065 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4066 	ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4067 	if (ctrl->port <= 0)
4068 		return -EINVAL;
4069 	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4070 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4071 	if (err) {
4072 		pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4073 		return err;
4074 	}
4075 	rule_header = (struct _rule_hw *)(ctrl + 1);
4076 	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4077 
4078 	switch (header_id) {
4079 	case MLX4_NET_TRANS_RULE_ID_ETH:
4080 		if (validate_eth_header_mac(slave, rule_header, rlist)) {
4081 			err = -EINVAL;
4082 			goto err_put;
4083 		}
4084 		break;
4085 	case MLX4_NET_TRANS_RULE_ID_IB:
4086 		break;
4087 	case MLX4_NET_TRANS_RULE_ID_IPV4:
4088 	case MLX4_NET_TRANS_RULE_ID_TCP:
4089 	case MLX4_NET_TRANS_RULE_ID_UDP:
4090 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4091 		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4092 			err = -EINVAL;
4093 			goto err_put;
4094 		}
4095 		vhcr->in_modifier +=
4096 			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4097 		break;
4098 	default:
4099 		pr_err("Corrupted mailbox\n");
4100 		err = -EINVAL;
4101 		goto err_put;
4102 	}
4103 
4104 	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4105 			   vhcr->in_modifier, 0,
4106 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4107 			   MLX4_CMD_NATIVE);
4108 	if (err)
4109 		goto err_put;
4110 
4111 	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4112 	if (err) {
4113 		mlx4_err(dev, "Fail to add flow steering resources\n");
4114 		/* detach rule*/
4115 		mlx4_cmd(dev, vhcr->out_param, 0, 0,
4116 			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4117 			 MLX4_CMD_NATIVE);
4118 		goto err_put;
4119 	}
4120 	atomic_inc(&rqp->ref_count);
4121 err_put:
4122 	put_res(dev, slave, qpn, RES_QP);
4123 	return err;
4124 }
4125 
mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4126 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4127 					 struct mlx4_vhcr *vhcr,
4128 					 struct mlx4_cmd_mailbox *inbox,
4129 					 struct mlx4_cmd_mailbox *outbox,
4130 					 struct mlx4_cmd_info *cmd)
4131 {
4132 	int err;
4133 	struct res_qp *rqp;
4134 	struct res_fs_rule *rrule;
4135 
4136 	if (dev->caps.steering_mode !=
4137 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4138 		return -EOPNOTSUPP;
4139 
4140 	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4141 	if (err)
4142 		return err;
4143 	/* Release the rule form busy state before removal */
4144 	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4145 	err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4146 	if (err)
4147 		return err;
4148 
4149 	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4150 	if (err) {
4151 		mlx4_err(dev, "Fail to remove flow steering resources\n");
4152 		goto out;
4153 	}
4154 
4155 	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4156 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4157 		       MLX4_CMD_NATIVE);
4158 	if (!err)
4159 		atomic_dec(&rqp->ref_count);
4160 out:
4161 	put_res(dev, slave, rrule->qpn, RES_QP);
4162 	return err;
4163 }
4164 
4165 enum {
4166 	BUSY_MAX_RETRIES = 10
4167 };
4168 
mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4169 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4170 			       struct mlx4_vhcr *vhcr,
4171 			       struct mlx4_cmd_mailbox *inbox,
4172 			       struct mlx4_cmd_mailbox *outbox,
4173 			       struct mlx4_cmd_info *cmd)
4174 {
4175 	int err;
4176 	int index = vhcr->in_modifier & 0xffff;
4177 
4178 	err = get_res(dev, slave, index, RES_COUNTER, NULL);
4179 	if (err)
4180 		return err;
4181 
4182 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4183 	put_res(dev, slave, index, RES_COUNTER);
4184 	return err;
4185 }
4186 
detach_qp(struct mlx4_dev * dev,int slave,struct res_qp * rqp)4187 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4188 {
4189 	struct res_gid *rgid;
4190 	struct res_gid *tmp;
4191 	struct mlx4_qp qp; /* dummy for calling attach/detach */
4192 
4193 	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4194 		switch (dev->caps.steering_mode) {
4195 		case MLX4_STEERING_MODE_DEVICE_MANAGED:
4196 			mlx4_flow_detach(dev, rgid->reg_id);
4197 			break;
4198 		case MLX4_STEERING_MODE_B0:
4199 			qp.qpn = rqp->local_qpn;
4200 			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4201 						     rgid->prot, rgid->steer);
4202 			break;
4203 		}
4204 		list_del(&rgid->list);
4205 		kfree(rgid);
4206 	}
4207 }
4208 
_move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int print)4209 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4210 			  enum mlx4_resource type, int print)
4211 {
4212 	struct mlx4_priv *priv = mlx4_priv(dev);
4213 	struct mlx4_resource_tracker *tracker =
4214 		&priv->mfunc.master.res_tracker;
4215 	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4216 	struct res_common *r;
4217 	struct res_common *tmp;
4218 	int busy;
4219 
4220 	busy = 0;
4221 	spin_lock_irq(mlx4_tlock(dev));
4222 	list_for_each_entry_safe(r, tmp, rlist, list) {
4223 		if (r->owner == slave) {
4224 			if (!r->removing) {
4225 				if (r->state == RES_ANY_BUSY) {
4226 					if (print)
4227 						mlx4_dbg(dev,
4228 							 "%s id 0x%llx is busy\n",
4229 							  resource_str(type),
4230 							  r->res_id);
4231 					++busy;
4232 				} else {
4233 					r->from_state = r->state;
4234 					r->state = RES_ANY_BUSY;
4235 					r->removing = 1;
4236 				}
4237 			}
4238 		}
4239 	}
4240 	spin_unlock_irq(mlx4_tlock(dev));
4241 
4242 	return busy;
4243 }
4244 
move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type)4245 static int move_all_busy(struct mlx4_dev *dev, int slave,
4246 			 enum mlx4_resource type)
4247 {
4248 	unsigned long begin;
4249 	int busy;
4250 
4251 	begin = jiffies;
4252 	do {
4253 		busy = _move_all_busy(dev, slave, type, 0);
4254 		if (time_after(jiffies, begin + 5 * HZ))
4255 			break;
4256 		if (busy)
4257 			cond_resched();
4258 	} while (busy);
4259 
4260 	if (busy)
4261 		busy = _move_all_busy(dev, slave, type, 1);
4262 
4263 	return busy;
4264 }
rem_slave_qps(struct mlx4_dev * dev,int slave)4265 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4266 {
4267 	struct mlx4_priv *priv = mlx4_priv(dev);
4268 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4269 	struct list_head *qp_list =
4270 		&tracker->slave_list[slave].res_list[RES_QP];
4271 	struct res_qp *qp;
4272 	struct res_qp *tmp;
4273 	int state;
4274 	u64 in_param;
4275 	int qpn;
4276 	int err;
4277 
4278 	err = move_all_busy(dev, slave, RES_QP);
4279 	if (err)
4280 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4281 			  slave);
4282 
4283 	spin_lock_irq(mlx4_tlock(dev));
4284 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4285 		spin_unlock_irq(mlx4_tlock(dev));
4286 		if (qp->com.owner == slave) {
4287 			qpn = qp->com.res_id;
4288 			detach_qp(dev, slave, qp);
4289 			state = qp->com.from_state;
4290 			while (state != 0) {
4291 				switch (state) {
4292 				case RES_QP_RESERVED:
4293 					spin_lock_irq(mlx4_tlock(dev));
4294 					rb_erase(&qp->com.node,
4295 						 &tracker->res_tree[RES_QP]);
4296 					list_del(&qp->com.list);
4297 					spin_unlock_irq(mlx4_tlock(dev));
4298 					if (!valid_reserved(dev, slave, qpn)) {
4299 						__mlx4_qp_release_range(dev, qpn, 1);
4300 						mlx4_release_resource(dev, slave,
4301 								      RES_QP, 1, 0);
4302 					}
4303 					kfree(qp);
4304 					state = 0;
4305 					break;
4306 				case RES_QP_MAPPED:
4307 					if (!valid_reserved(dev, slave, qpn))
4308 						__mlx4_qp_free_icm(dev, qpn);
4309 					state = RES_QP_RESERVED;
4310 					break;
4311 				case RES_QP_HW:
4312 					in_param = slave;
4313 					err = mlx4_cmd(dev, in_param,
4314 						       qp->local_qpn, 2,
4315 						       MLX4_CMD_2RST_QP,
4316 						       MLX4_CMD_TIME_CLASS_A,
4317 						       MLX4_CMD_NATIVE);
4318 					if (err)
4319 						mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4320 							 slave, qp->local_qpn);
4321 					atomic_dec(&qp->rcq->ref_count);
4322 					atomic_dec(&qp->scq->ref_count);
4323 					atomic_dec(&qp->mtt->ref_count);
4324 					if (qp->srq)
4325 						atomic_dec(&qp->srq->ref_count);
4326 					state = RES_QP_MAPPED;
4327 					break;
4328 				default:
4329 					state = 0;
4330 				}
4331 			}
4332 		}
4333 		spin_lock_irq(mlx4_tlock(dev));
4334 	}
4335 	spin_unlock_irq(mlx4_tlock(dev));
4336 }
4337 
rem_slave_srqs(struct mlx4_dev * dev,int slave)4338 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4339 {
4340 	struct mlx4_priv *priv = mlx4_priv(dev);
4341 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4342 	struct list_head *srq_list =
4343 		&tracker->slave_list[slave].res_list[RES_SRQ];
4344 	struct res_srq *srq;
4345 	struct res_srq *tmp;
4346 	int state;
4347 	u64 in_param;
4348 	LIST_HEAD(tlist);
4349 	int srqn;
4350 	int err;
4351 
4352 	err = move_all_busy(dev, slave, RES_SRQ);
4353 	if (err)
4354 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4355 			  slave);
4356 
4357 	spin_lock_irq(mlx4_tlock(dev));
4358 	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4359 		spin_unlock_irq(mlx4_tlock(dev));
4360 		if (srq->com.owner == slave) {
4361 			srqn = srq->com.res_id;
4362 			state = srq->com.from_state;
4363 			while (state != 0) {
4364 				switch (state) {
4365 				case RES_SRQ_ALLOCATED:
4366 					__mlx4_srq_free_icm(dev, srqn);
4367 					spin_lock_irq(mlx4_tlock(dev));
4368 					rb_erase(&srq->com.node,
4369 						 &tracker->res_tree[RES_SRQ]);
4370 					list_del(&srq->com.list);
4371 					spin_unlock_irq(mlx4_tlock(dev));
4372 					mlx4_release_resource(dev, slave,
4373 							      RES_SRQ, 1, 0);
4374 					kfree(srq);
4375 					state = 0;
4376 					break;
4377 
4378 				case RES_SRQ_HW:
4379 					in_param = slave;
4380 					err = mlx4_cmd(dev, in_param, srqn, 1,
4381 						       MLX4_CMD_HW2SW_SRQ,
4382 						       MLX4_CMD_TIME_CLASS_A,
4383 						       MLX4_CMD_NATIVE);
4384 					if (err)
4385 						mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4386 							 slave, srqn);
4387 
4388 					atomic_dec(&srq->mtt->ref_count);
4389 					if (srq->cq)
4390 						atomic_dec(&srq->cq->ref_count);
4391 					state = RES_SRQ_ALLOCATED;
4392 					break;
4393 
4394 				default:
4395 					state = 0;
4396 				}
4397 			}
4398 		}
4399 		spin_lock_irq(mlx4_tlock(dev));
4400 	}
4401 	spin_unlock_irq(mlx4_tlock(dev));
4402 }
4403 
rem_slave_cqs(struct mlx4_dev * dev,int slave)4404 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4405 {
4406 	struct mlx4_priv *priv = mlx4_priv(dev);
4407 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4408 	struct list_head *cq_list =
4409 		&tracker->slave_list[slave].res_list[RES_CQ];
4410 	struct res_cq *cq;
4411 	struct res_cq *tmp;
4412 	int state;
4413 	u64 in_param;
4414 	LIST_HEAD(tlist);
4415 	int cqn;
4416 	int err;
4417 
4418 	err = move_all_busy(dev, slave, RES_CQ);
4419 	if (err)
4420 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4421 			  slave);
4422 
4423 	spin_lock_irq(mlx4_tlock(dev));
4424 	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4425 		spin_unlock_irq(mlx4_tlock(dev));
4426 		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4427 			cqn = cq->com.res_id;
4428 			state = cq->com.from_state;
4429 			while (state != 0) {
4430 				switch (state) {
4431 				case RES_CQ_ALLOCATED:
4432 					__mlx4_cq_free_icm(dev, cqn);
4433 					spin_lock_irq(mlx4_tlock(dev));
4434 					rb_erase(&cq->com.node,
4435 						 &tracker->res_tree[RES_CQ]);
4436 					list_del(&cq->com.list);
4437 					spin_unlock_irq(mlx4_tlock(dev));
4438 					mlx4_release_resource(dev, slave,
4439 							      RES_CQ, 1, 0);
4440 					kfree(cq);
4441 					state = 0;
4442 					break;
4443 
4444 				case RES_CQ_HW:
4445 					in_param = slave;
4446 					err = mlx4_cmd(dev, in_param, cqn, 1,
4447 						       MLX4_CMD_HW2SW_CQ,
4448 						       MLX4_CMD_TIME_CLASS_A,
4449 						       MLX4_CMD_NATIVE);
4450 					if (err)
4451 						mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4452 							 slave, cqn);
4453 					atomic_dec(&cq->mtt->ref_count);
4454 					state = RES_CQ_ALLOCATED;
4455 					break;
4456 
4457 				default:
4458 					state = 0;
4459 				}
4460 			}
4461 		}
4462 		spin_lock_irq(mlx4_tlock(dev));
4463 	}
4464 	spin_unlock_irq(mlx4_tlock(dev));
4465 }
4466 
rem_slave_mrs(struct mlx4_dev * dev,int slave)4467 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4468 {
4469 	struct mlx4_priv *priv = mlx4_priv(dev);
4470 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4471 	struct list_head *mpt_list =
4472 		&tracker->slave_list[slave].res_list[RES_MPT];
4473 	struct res_mpt *mpt;
4474 	struct res_mpt *tmp;
4475 	int state;
4476 	u64 in_param;
4477 	LIST_HEAD(tlist);
4478 	int mptn;
4479 	int err;
4480 
4481 	err = move_all_busy(dev, slave, RES_MPT);
4482 	if (err)
4483 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4484 			  slave);
4485 
4486 	spin_lock_irq(mlx4_tlock(dev));
4487 	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4488 		spin_unlock_irq(mlx4_tlock(dev));
4489 		if (mpt->com.owner == slave) {
4490 			mptn = mpt->com.res_id;
4491 			state = mpt->com.from_state;
4492 			while (state != 0) {
4493 				switch (state) {
4494 				case RES_MPT_RESERVED:
4495 					__mlx4_mpt_release(dev, mpt->key);
4496 					spin_lock_irq(mlx4_tlock(dev));
4497 					rb_erase(&mpt->com.node,
4498 						 &tracker->res_tree[RES_MPT]);
4499 					list_del(&mpt->com.list);
4500 					spin_unlock_irq(mlx4_tlock(dev));
4501 					mlx4_release_resource(dev, slave,
4502 							      RES_MPT, 1, 0);
4503 					kfree(mpt);
4504 					state = 0;
4505 					break;
4506 
4507 				case RES_MPT_MAPPED:
4508 					__mlx4_mpt_free_icm(dev, mpt->key);
4509 					state = RES_MPT_RESERVED;
4510 					break;
4511 
4512 				case RES_MPT_HW:
4513 					in_param = slave;
4514 					err = mlx4_cmd(dev, in_param, mptn, 0,
4515 						     MLX4_CMD_HW2SW_MPT,
4516 						     MLX4_CMD_TIME_CLASS_A,
4517 						     MLX4_CMD_NATIVE);
4518 					if (err)
4519 						mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4520 							 slave, mptn);
4521 					if (mpt->mtt)
4522 						atomic_dec(&mpt->mtt->ref_count);
4523 					state = RES_MPT_MAPPED;
4524 					break;
4525 				default:
4526 					state = 0;
4527 				}
4528 			}
4529 		}
4530 		spin_lock_irq(mlx4_tlock(dev));
4531 	}
4532 	spin_unlock_irq(mlx4_tlock(dev));
4533 }
4534 
rem_slave_mtts(struct mlx4_dev * dev,int slave)4535 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4536 {
4537 	struct mlx4_priv *priv = mlx4_priv(dev);
4538 	struct mlx4_resource_tracker *tracker =
4539 		&priv->mfunc.master.res_tracker;
4540 	struct list_head *mtt_list =
4541 		&tracker->slave_list[slave].res_list[RES_MTT];
4542 	struct res_mtt *mtt;
4543 	struct res_mtt *tmp;
4544 	int state;
4545 	LIST_HEAD(tlist);
4546 	int base;
4547 	int err;
4548 
4549 	err = move_all_busy(dev, slave, RES_MTT);
4550 	if (err)
4551 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4552 			  slave);
4553 
4554 	spin_lock_irq(mlx4_tlock(dev));
4555 	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4556 		spin_unlock_irq(mlx4_tlock(dev));
4557 		if (mtt->com.owner == slave) {
4558 			base = mtt->com.res_id;
4559 			state = mtt->com.from_state;
4560 			while (state != 0) {
4561 				switch (state) {
4562 				case RES_MTT_ALLOCATED:
4563 					__mlx4_free_mtt_range(dev, base,
4564 							      mtt->order);
4565 					spin_lock_irq(mlx4_tlock(dev));
4566 					rb_erase(&mtt->com.node,
4567 						 &tracker->res_tree[RES_MTT]);
4568 					list_del(&mtt->com.list);
4569 					spin_unlock_irq(mlx4_tlock(dev));
4570 					mlx4_release_resource(dev, slave, RES_MTT,
4571 							      1 << mtt->order, 0);
4572 					kfree(mtt);
4573 					state = 0;
4574 					break;
4575 
4576 				default:
4577 					state = 0;
4578 				}
4579 			}
4580 		}
4581 		spin_lock_irq(mlx4_tlock(dev));
4582 	}
4583 	spin_unlock_irq(mlx4_tlock(dev));
4584 }
4585 
rem_slave_fs_rule(struct mlx4_dev * dev,int slave)4586 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4587 {
4588 	struct mlx4_priv *priv = mlx4_priv(dev);
4589 	struct mlx4_resource_tracker *tracker =
4590 		&priv->mfunc.master.res_tracker;
4591 	struct list_head *fs_rule_list =
4592 		&tracker->slave_list[slave].res_list[RES_FS_RULE];
4593 	struct res_fs_rule *fs_rule;
4594 	struct res_fs_rule *tmp;
4595 	int state;
4596 	u64 base;
4597 	int err;
4598 
4599 	err = move_all_busy(dev, slave, RES_FS_RULE);
4600 	if (err)
4601 		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4602 			  slave);
4603 
4604 	spin_lock_irq(mlx4_tlock(dev));
4605 	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4606 		spin_unlock_irq(mlx4_tlock(dev));
4607 		if (fs_rule->com.owner == slave) {
4608 			base = fs_rule->com.res_id;
4609 			state = fs_rule->com.from_state;
4610 			while (state != 0) {
4611 				switch (state) {
4612 				case RES_FS_RULE_ALLOCATED:
4613 					/* detach rule */
4614 					err = mlx4_cmd(dev, base, 0, 0,
4615 						       MLX4_QP_FLOW_STEERING_DETACH,
4616 						       MLX4_CMD_TIME_CLASS_A,
4617 						       MLX4_CMD_NATIVE);
4618 
4619 					spin_lock_irq(mlx4_tlock(dev));
4620 					rb_erase(&fs_rule->com.node,
4621 						 &tracker->res_tree[RES_FS_RULE]);
4622 					list_del(&fs_rule->com.list);
4623 					spin_unlock_irq(mlx4_tlock(dev));
4624 					kfree(fs_rule);
4625 					state = 0;
4626 					break;
4627 
4628 				default:
4629 					state = 0;
4630 				}
4631 			}
4632 		}
4633 		spin_lock_irq(mlx4_tlock(dev));
4634 	}
4635 	spin_unlock_irq(mlx4_tlock(dev));
4636 }
4637 
rem_slave_eqs(struct mlx4_dev * dev,int slave)4638 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4639 {
4640 	struct mlx4_priv *priv = mlx4_priv(dev);
4641 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4642 	struct list_head *eq_list =
4643 		&tracker->slave_list[slave].res_list[RES_EQ];
4644 	struct res_eq *eq;
4645 	struct res_eq *tmp;
4646 	int err;
4647 	int state;
4648 	LIST_HEAD(tlist);
4649 	int eqn;
4650 	struct mlx4_cmd_mailbox *mailbox;
4651 
4652 	err = move_all_busy(dev, slave, RES_EQ);
4653 	if (err)
4654 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4655 			  slave);
4656 
4657 	spin_lock_irq(mlx4_tlock(dev));
4658 	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4659 		spin_unlock_irq(mlx4_tlock(dev));
4660 		if (eq->com.owner == slave) {
4661 			eqn = eq->com.res_id;
4662 			state = eq->com.from_state;
4663 			while (state != 0) {
4664 				switch (state) {
4665 				case RES_EQ_RESERVED:
4666 					spin_lock_irq(mlx4_tlock(dev));
4667 					rb_erase(&eq->com.node,
4668 						 &tracker->res_tree[RES_EQ]);
4669 					list_del(&eq->com.list);
4670 					spin_unlock_irq(mlx4_tlock(dev));
4671 					kfree(eq);
4672 					state = 0;
4673 					break;
4674 
4675 				case RES_EQ_HW:
4676 					mailbox = mlx4_alloc_cmd_mailbox(dev);
4677 					if (IS_ERR(mailbox)) {
4678 						cond_resched();
4679 						continue;
4680 					}
4681 					err = mlx4_cmd_box(dev, slave, 0,
4682 							   eqn & 0xff, 0,
4683 							   MLX4_CMD_HW2SW_EQ,
4684 							   MLX4_CMD_TIME_CLASS_A,
4685 							   MLX4_CMD_NATIVE);
4686 					if (err)
4687 						mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4688 							 slave, eqn);
4689 					mlx4_free_cmd_mailbox(dev, mailbox);
4690 					atomic_dec(&eq->mtt->ref_count);
4691 					state = RES_EQ_RESERVED;
4692 					break;
4693 
4694 				default:
4695 					state = 0;
4696 				}
4697 			}
4698 		}
4699 		spin_lock_irq(mlx4_tlock(dev));
4700 	}
4701 	spin_unlock_irq(mlx4_tlock(dev));
4702 }
4703 
rem_slave_counters(struct mlx4_dev * dev,int slave)4704 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4705 {
4706 	struct mlx4_priv *priv = mlx4_priv(dev);
4707 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4708 	struct list_head *counter_list =
4709 		&tracker->slave_list[slave].res_list[RES_COUNTER];
4710 	struct res_counter *counter;
4711 	struct res_counter *tmp;
4712 	int err;
4713 	int index;
4714 
4715 	err = move_all_busy(dev, slave, RES_COUNTER);
4716 	if (err)
4717 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4718 			  slave);
4719 
4720 	spin_lock_irq(mlx4_tlock(dev));
4721 	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4722 		if (counter->com.owner == slave) {
4723 			index = counter->com.res_id;
4724 			rb_erase(&counter->com.node,
4725 				 &tracker->res_tree[RES_COUNTER]);
4726 			list_del(&counter->com.list);
4727 			kfree(counter);
4728 			__mlx4_counter_free(dev, index);
4729 			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4730 		}
4731 	}
4732 	spin_unlock_irq(mlx4_tlock(dev));
4733 }
4734 
rem_slave_xrcdns(struct mlx4_dev * dev,int slave)4735 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4736 {
4737 	struct mlx4_priv *priv = mlx4_priv(dev);
4738 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4739 	struct list_head *xrcdn_list =
4740 		&tracker->slave_list[slave].res_list[RES_XRCD];
4741 	struct res_xrcdn *xrcd;
4742 	struct res_xrcdn *tmp;
4743 	int err;
4744 	int xrcdn;
4745 
4746 	err = move_all_busy(dev, slave, RES_XRCD);
4747 	if (err)
4748 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4749 			  slave);
4750 
4751 	spin_lock_irq(mlx4_tlock(dev));
4752 	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4753 		if (xrcd->com.owner == slave) {
4754 			xrcdn = xrcd->com.res_id;
4755 			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4756 			list_del(&xrcd->com.list);
4757 			kfree(xrcd);
4758 			__mlx4_xrcd_free(dev, xrcdn);
4759 		}
4760 	}
4761 	spin_unlock_irq(mlx4_tlock(dev));
4762 }
4763 
mlx4_delete_all_resources_for_slave(struct mlx4_dev * dev,int slave)4764 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4765 {
4766 	struct mlx4_priv *priv = mlx4_priv(dev);
4767 	mlx4_reset_roce_gids(dev, slave);
4768 	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4769 	rem_slave_vlans(dev, slave);
4770 	rem_slave_macs(dev, slave);
4771 	rem_slave_fs_rule(dev, slave);
4772 	rem_slave_qps(dev, slave);
4773 	rem_slave_srqs(dev, slave);
4774 	rem_slave_cqs(dev, slave);
4775 	rem_slave_mrs(dev, slave);
4776 	rem_slave_eqs(dev, slave);
4777 	rem_slave_mtts(dev, slave);
4778 	rem_slave_counters(dev, slave);
4779 	rem_slave_xrcdns(dev, slave);
4780 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4781 }
4782 
mlx4_vf_immed_vlan_work_handler(struct work_struct * _work)4783 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4784 {
4785 	struct mlx4_vf_immed_vlan_work *work =
4786 		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4787 	struct mlx4_cmd_mailbox *mailbox;
4788 	struct mlx4_update_qp_context *upd_context;
4789 	struct mlx4_dev *dev = &work->priv->dev;
4790 	struct mlx4_resource_tracker *tracker =
4791 		&work->priv->mfunc.master.res_tracker;
4792 	struct list_head *qp_list =
4793 		&tracker->slave_list[work->slave].res_list[RES_QP];
4794 	struct res_qp *qp;
4795 	struct res_qp *tmp;
4796 	u64 qp_path_mask_vlan_ctrl =
4797 		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4798 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4799 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4800 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4801 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4802 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4803 
4804 	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4805 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4806 		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4807 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4808 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4809 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4810 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4811 
4812 	int err;
4813 	int port, errors = 0;
4814 	u8 vlan_control;
4815 
4816 	if (mlx4_is_slave(dev)) {
4817 		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4818 			  work->slave);
4819 		goto out;
4820 	}
4821 
4822 	mailbox = mlx4_alloc_cmd_mailbox(dev);
4823 	if (IS_ERR(mailbox))
4824 		goto out;
4825 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4826 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4827 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4828 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4829 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4830 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4831 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4832 	else if (!work->vlan_id)
4833 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4834 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4835 	else
4836 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4837 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4838 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4839 
4840 	upd_context = mailbox->buf;
4841 	upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4842 
4843 	spin_lock_irq(mlx4_tlock(dev));
4844 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4845 		spin_unlock_irq(mlx4_tlock(dev));
4846 		if (qp->com.owner == work->slave) {
4847 			if (qp->com.from_state != RES_QP_HW ||
4848 			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
4849 			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4850 			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4851 				spin_lock_irq(mlx4_tlock(dev));
4852 				continue;
4853 			}
4854 			port = (qp->sched_queue >> 6 & 1) + 1;
4855 			if (port != work->port) {
4856 				spin_lock_irq(mlx4_tlock(dev));
4857 				continue;
4858 			}
4859 			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4860 				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4861 			else
4862 				upd_context->primary_addr_path_mask =
4863 					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4864 			if (work->vlan_id == MLX4_VGT) {
4865 				upd_context->qp_context.param3 = qp->param3;
4866 				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4867 				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4868 				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4869 				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4870 				upd_context->qp_context.pri_path.feup = qp->feup;
4871 				upd_context->qp_context.pri_path.sched_queue =
4872 					qp->sched_queue;
4873 			} else {
4874 				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4875 				upd_context->qp_context.pri_path.vlan_control = vlan_control;
4876 				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4877 				upd_context->qp_context.pri_path.fvl_rx =
4878 					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4879 				upd_context->qp_context.pri_path.fl =
4880 					qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4881 				upd_context->qp_context.pri_path.feup =
4882 					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4883 				upd_context->qp_context.pri_path.sched_queue =
4884 					qp->sched_queue & 0xC7;
4885 				upd_context->qp_context.pri_path.sched_queue |=
4886 					((work->qos & 0x7) << 3);
4887 			}
4888 
4889 			err = mlx4_cmd(dev, mailbox->dma,
4890 				       qp->local_qpn & 0xffffff,
4891 				       0, MLX4_CMD_UPDATE_QP,
4892 				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4893 			if (err) {
4894 				mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4895 					  work->slave, port, qp->local_qpn, err);
4896 				errors++;
4897 			}
4898 		}
4899 		spin_lock_irq(mlx4_tlock(dev));
4900 	}
4901 	spin_unlock_irq(mlx4_tlock(dev));
4902 	mlx4_free_cmd_mailbox(dev, mailbox);
4903 
4904 	if (errors)
4905 		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4906 			 errors, work->slave, work->port);
4907 
4908 	/* unregister previous vlan_id if needed and we had no errors
4909 	 * while updating the QPs
4910 	 */
4911 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4912 	    NO_INDX != work->orig_vlan_ix)
4913 		__mlx4_unregister_vlan(&work->priv->dev, work->port,
4914 				       work->orig_vlan_id);
4915 out:
4916 	kfree(work);
4917 	return;
4918 }
4919