1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/string.h>
35 #include <linux/etherdevice.h>
36
37 #include <linux/mlx4/cmd.h>
38 #include <linux/mlx4/qp.h>
39 #include <linux/export.h>
40
41 #include "mlx4.h"
42
43 static const u8 zero_gid[16]; /* automatically initialized to 0 */
44
mlx4_get_mgm_entry_size(struct mlx4_dev * dev)45 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
46 {
47 return 1 << dev->oper_log_mgm_entry_size;
48 }
49
mlx4_get_qp_per_mgm(struct mlx4_dev * dev)50 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
51 {
52 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
53 }
54
mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * mailbox,u32 size,u64 * reg_id)55 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
56 struct mlx4_cmd_mailbox *mailbox,
57 u32 size,
58 u64 *reg_id)
59 {
60 u64 imm;
61 int err = 0;
62
63 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
64 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
65 MLX4_CMD_NATIVE);
66 if (err)
67 return err;
68 *reg_id = imm;
69
70 return err;
71 }
72
mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev * dev,u64 regid)73 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
74 {
75 int err = 0;
76
77 err = mlx4_cmd(dev, regid, 0, 0,
78 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
79 MLX4_CMD_NATIVE);
80
81 return err;
82 }
83
mlx4_READ_ENTRY(struct mlx4_dev * dev,int index,struct mlx4_cmd_mailbox * mailbox)84 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
85 struct mlx4_cmd_mailbox *mailbox)
86 {
87 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
88 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
89 }
90
mlx4_WRITE_ENTRY(struct mlx4_dev * dev,int index,struct mlx4_cmd_mailbox * mailbox)91 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
92 struct mlx4_cmd_mailbox *mailbox)
93 {
94 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
95 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
96 }
97
mlx4_WRITE_PROMISC(struct mlx4_dev * dev,u8 port,u8 steer,struct mlx4_cmd_mailbox * mailbox)98 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
99 struct mlx4_cmd_mailbox *mailbox)
100 {
101 u32 in_mod;
102
103 in_mod = (u32) port << 16 | steer << 1;
104 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
105 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
106 MLX4_CMD_NATIVE);
107 }
108
mlx4_GID_HASH(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * mailbox,u16 * hash,u8 op_mod)109 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
110 u16 *hash, u8 op_mod)
111 {
112 u64 imm;
113 int err;
114
115 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
116 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
117 MLX4_CMD_NATIVE);
118
119 if (!err)
120 *hash = imm;
121
122 return err;
123 }
124
get_promisc_qp(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,u32 qpn)125 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
126 enum mlx4_steer_type steer,
127 u32 qpn)
128 {
129 struct mlx4_steer *s_steer;
130 struct mlx4_promisc_qp *pqp;
131
132 if (port < 1 || port > dev->caps.num_ports)
133 return NULL;
134
135 s_steer = &mlx4_priv(dev)->steer[port - 1];
136
137 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
138 if (pqp->qpn == qpn)
139 return pqp;
140 }
141 /* not found */
142 return NULL;
143 }
144
145 /*
146 * Add new entry to steering data structure.
147 * All promisc QPs should be added as well
148 */
new_steering_entry(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,unsigned int index,u32 qpn)149 static int new_steering_entry(struct mlx4_dev *dev, u8 port,
150 enum mlx4_steer_type steer,
151 unsigned int index, u32 qpn)
152 {
153 struct mlx4_steer *s_steer;
154 struct mlx4_cmd_mailbox *mailbox;
155 struct mlx4_mgm *mgm;
156 u32 members_count;
157 struct mlx4_steer_index *new_entry;
158 struct mlx4_promisc_qp *pqp;
159 struct mlx4_promisc_qp *dqp = NULL;
160 u32 prot;
161 int err;
162
163 if (port < 1 || port > dev->caps.num_ports)
164 return -EINVAL;
165
166 s_steer = &mlx4_priv(dev)->steer[port - 1];
167 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
168 if (!new_entry)
169 return -ENOMEM;
170
171 INIT_LIST_HEAD(&new_entry->duplicates);
172 new_entry->index = index;
173 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
174
175 /* If the given qpn is also a promisc qp,
176 * it should be inserted to duplicates list
177 */
178 pqp = get_promisc_qp(dev, port, steer, qpn);
179 if (pqp) {
180 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
181 if (!dqp) {
182 err = -ENOMEM;
183 goto out_alloc;
184 }
185 dqp->qpn = qpn;
186 list_add_tail(&dqp->list, &new_entry->duplicates);
187 }
188
189 /* if no promisc qps for this vep, we are done */
190 if (list_empty(&s_steer->promisc_qps[steer]))
191 return 0;
192
193 /* now need to add all the promisc qps to the new
194 * steering entry, as they should also receive the packets
195 * destined to this address */
196 mailbox = mlx4_alloc_cmd_mailbox(dev);
197 if (IS_ERR(mailbox)) {
198 err = -ENOMEM;
199 goto out_alloc;
200 }
201 mgm = mailbox->buf;
202
203 err = mlx4_READ_ENTRY(dev, index, mailbox);
204 if (err)
205 goto out_mailbox;
206
207 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
208 prot = be32_to_cpu(mgm->members_count) >> 30;
209 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
210 /* don't add already existing qpn */
211 if (pqp->qpn == qpn)
212 continue;
213 if (members_count == dev->caps.num_qp_per_mgm) {
214 /* out of space */
215 err = -ENOMEM;
216 goto out_mailbox;
217 }
218
219 /* add the qpn */
220 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
221 }
222 /* update the qps count and update the entry with all the promisc qps*/
223 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
224 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
225
226 out_mailbox:
227 mlx4_free_cmd_mailbox(dev, mailbox);
228 if (!err)
229 return 0;
230 out_alloc:
231 if (dqp) {
232 list_del(&dqp->list);
233 kfree(dqp);
234 }
235 list_del(&new_entry->list);
236 kfree(new_entry);
237 return err;
238 }
239
240 /* update the data structures with existing steering entry */
existing_steering_entry(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,unsigned int index,u32 qpn)241 static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
242 enum mlx4_steer_type steer,
243 unsigned int index, u32 qpn)
244 {
245 struct mlx4_steer *s_steer;
246 struct mlx4_steer_index *tmp_entry, *entry = NULL;
247 struct mlx4_promisc_qp *pqp;
248 struct mlx4_promisc_qp *dqp;
249
250 if (port < 1 || port > dev->caps.num_ports)
251 return -EINVAL;
252
253 s_steer = &mlx4_priv(dev)->steer[port - 1];
254
255 pqp = get_promisc_qp(dev, port, steer, qpn);
256 if (!pqp)
257 return 0; /* nothing to do */
258
259 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
260 if (tmp_entry->index == index) {
261 entry = tmp_entry;
262 break;
263 }
264 }
265 if (unlikely(!entry)) {
266 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
267 return -EINVAL;
268 }
269
270 /* the given qpn is listed as a promisc qpn
271 * we need to add it as a duplicate to this entry
272 * for future references */
273 list_for_each_entry(dqp, &entry->duplicates, list) {
274 if (qpn == dqp->qpn)
275 return 0; /* qp is already duplicated */
276 }
277
278 /* add the qp as a duplicate on this index */
279 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
280 if (!dqp)
281 return -ENOMEM;
282 dqp->qpn = qpn;
283 list_add_tail(&dqp->list, &entry->duplicates);
284
285 return 0;
286 }
287
288 /* Check whether a qpn is a duplicate on steering entry
289 * If so, it should not be removed from mgm */
check_duplicate_entry(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,unsigned int index,u32 qpn)290 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
291 enum mlx4_steer_type steer,
292 unsigned int index, u32 qpn)
293 {
294 struct mlx4_steer *s_steer;
295 struct mlx4_steer_index *tmp_entry, *entry = NULL;
296 struct mlx4_promisc_qp *dqp, *tmp_dqp;
297
298 if (port < 1 || port > dev->caps.num_ports)
299 return NULL;
300
301 s_steer = &mlx4_priv(dev)->steer[port - 1];
302
303 /* if qp is not promisc, it cannot be duplicated */
304 if (!get_promisc_qp(dev, port, steer, qpn))
305 return false;
306
307 /* The qp is promisc qp so it is a duplicate on this index
308 * Find the index entry, and remove the duplicate */
309 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
310 if (tmp_entry->index == index) {
311 entry = tmp_entry;
312 break;
313 }
314 }
315 if (unlikely(!entry)) {
316 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
317 return false;
318 }
319 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
320 if (dqp->qpn == qpn) {
321 list_del(&dqp->list);
322 kfree(dqp);
323 }
324 }
325 return true;
326 }
327
328 /* Returns true if all the QPs != tqpn contained in this entry
329 * are Promisc QPs. Returns false otherwise.
330 */
promisc_steering_entry(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,unsigned int index,u32 tqpn,u32 * members_count)331 static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
332 enum mlx4_steer_type steer,
333 unsigned int index, u32 tqpn,
334 u32 *members_count)
335 {
336 struct mlx4_cmd_mailbox *mailbox;
337 struct mlx4_mgm *mgm;
338 u32 m_count;
339 bool ret = false;
340 int i;
341
342 if (port < 1 || port > dev->caps.num_ports)
343 return false;
344
345 mailbox = mlx4_alloc_cmd_mailbox(dev);
346 if (IS_ERR(mailbox))
347 return false;
348 mgm = mailbox->buf;
349
350 if (mlx4_READ_ENTRY(dev, index, mailbox))
351 goto out;
352 m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
353 if (members_count)
354 *members_count = m_count;
355
356 for (i = 0; i < m_count; i++) {
357 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
358 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
359 /* the qp is not promisc, the entry can't be removed */
360 goto out;
361 }
362 }
363 ret = true;
364 out:
365 mlx4_free_cmd_mailbox(dev, mailbox);
366 return ret;
367 }
368
369 /* IF a steering entry contains only promisc QPs, it can be removed. */
can_remove_steering_entry(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,unsigned int index,u32 tqpn)370 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
371 enum mlx4_steer_type steer,
372 unsigned int index, u32 tqpn)
373 {
374 struct mlx4_steer *s_steer;
375 struct mlx4_steer_index *entry = NULL, *tmp_entry;
376 u32 members_count;
377 bool ret = false;
378
379 if (port < 1 || port > dev->caps.num_ports)
380 return NULL;
381
382 s_steer = &mlx4_priv(dev)->steer[port - 1];
383
384 if (!promisc_steering_entry(dev, port, steer, index,
385 tqpn, &members_count))
386 goto out;
387
388 /* All the qps currently registered for this entry are promiscuous,
389 * Checking for duplicates */
390 ret = true;
391 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
392 if (entry->index == index) {
393 if (list_empty(&entry->duplicates) ||
394 members_count == 1) {
395 struct mlx4_promisc_qp *pqp, *tmp_pqp;
396 /* If there is only 1 entry in duplicates then
397 * this is the QP we want to delete, going over
398 * the list and deleting the entry.
399 */
400 list_del(&entry->list);
401 list_for_each_entry_safe(pqp, tmp_pqp,
402 &entry->duplicates,
403 list) {
404 list_del(&pqp->list);
405 kfree(pqp);
406 }
407 kfree(entry);
408 } else {
409 /* This entry contains duplicates so it shouldn't be removed */
410 ret = false;
411 goto out;
412 }
413 }
414 }
415
416 out:
417 return ret;
418 }
419
add_promisc_qp(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,u32 qpn)420 static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
421 enum mlx4_steer_type steer, u32 qpn)
422 {
423 struct mlx4_steer *s_steer;
424 struct mlx4_cmd_mailbox *mailbox;
425 struct mlx4_mgm *mgm;
426 struct mlx4_steer_index *entry;
427 struct mlx4_promisc_qp *pqp;
428 struct mlx4_promisc_qp *dqp;
429 u32 members_count;
430 u32 prot;
431 int i;
432 bool found;
433 int err;
434 struct mlx4_priv *priv = mlx4_priv(dev);
435
436 if (port < 1 || port > dev->caps.num_ports)
437 return -EINVAL;
438
439 s_steer = &mlx4_priv(dev)->steer[port - 1];
440
441 mutex_lock(&priv->mcg_table.mutex);
442
443 if (get_promisc_qp(dev, port, steer, qpn)) {
444 err = 0; /* Noting to do, already exists */
445 goto out_mutex;
446 }
447
448 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
449 if (!pqp) {
450 err = -ENOMEM;
451 goto out_mutex;
452 }
453 pqp->qpn = qpn;
454
455 mailbox = mlx4_alloc_cmd_mailbox(dev);
456 if (IS_ERR(mailbox)) {
457 err = -ENOMEM;
458 goto out_alloc;
459 }
460 mgm = mailbox->buf;
461
462 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
463 /* The promisc QP needs to be added for each one of the steering
464 * entries. If it already exists, needs to be added as
465 * a duplicate for this entry.
466 */
467 list_for_each_entry(entry,
468 &s_steer->steer_entries[steer],
469 list) {
470 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
471 if (err)
472 goto out_mailbox;
473
474 members_count = be32_to_cpu(mgm->members_count) &
475 0xffffff;
476 prot = be32_to_cpu(mgm->members_count) >> 30;
477 found = false;
478 for (i = 0; i < members_count; i++) {
479 if ((be32_to_cpu(mgm->qp[i]) &
480 MGM_QPN_MASK) == qpn) {
481 /* Entry already exists.
482 * Add to duplicates.
483 */
484 dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
485 if (!dqp) {
486 err = -ENOMEM;
487 goto out_mailbox;
488 }
489 dqp->qpn = qpn;
490 list_add_tail(&dqp->list,
491 &entry->duplicates);
492 found = true;
493 }
494 }
495 if (!found) {
496 /* Need to add the qpn to mgm */
497 if (members_count ==
498 dev->caps.num_qp_per_mgm) {
499 /* entry is full */
500 err = -ENOMEM;
501 goto out_mailbox;
502 }
503 mgm->qp[members_count++] =
504 cpu_to_be32(qpn & MGM_QPN_MASK);
505 mgm->members_count =
506 cpu_to_be32(members_count |
507 (prot << 30));
508 err = mlx4_WRITE_ENTRY(dev, entry->index,
509 mailbox);
510 if (err)
511 goto out_mailbox;
512 }
513 }
514 }
515
516 /* add the new qpn to list of promisc qps */
517 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
518 /* now need to add all the promisc qps to default entry */
519 memset(mgm, 0, sizeof *mgm);
520 members_count = 0;
521 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
522 if (members_count == dev->caps.num_qp_per_mgm) {
523 /* entry is full */
524 err = -ENOMEM;
525 goto out_list;
526 }
527 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
528 }
529 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
530
531 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
532 if (err)
533 goto out_list;
534
535 mlx4_free_cmd_mailbox(dev, mailbox);
536 mutex_unlock(&priv->mcg_table.mutex);
537 return 0;
538
539 out_list:
540 list_del(&pqp->list);
541 out_mailbox:
542 mlx4_free_cmd_mailbox(dev, mailbox);
543 out_alloc:
544 kfree(pqp);
545 out_mutex:
546 mutex_unlock(&priv->mcg_table.mutex);
547 return err;
548 }
549
remove_promisc_qp(struct mlx4_dev * dev,u8 port,enum mlx4_steer_type steer,u32 qpn)550 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
551 enum mlx4_steer_type steer, u32 qpn)
552 {
553 struct mlx4_priv *priv = mlx4_priv(dev);
554 struct mlx4_steer *s_steer;
555 struct mlx4_cmd_mailbox *mailbox;
556 struct mlx4_mgm *mgm;
557 struct mlx4_steer_index *entry, *tmp_entry;
558 struct mlx4_promisc_qp *pqp;
559 struct mlx4_promisc_qp *dqp;
560 u32 members_count;
561 bool found;
562 bool back_to_list = false;
563 int i;
564 int err;
565
566 if (port < 1 || port > dev->caps.num_ports)
567 return -EINVAL;
568
569 s_steer = &mlx4_priv(dev)->steer[port - 1];
570 mutex_lock(&priv->mcg_table.mutex);
571
572 pqp = get_promisc_qp(dev, port, steer, qpn);
573 if (unlikely(!pqp)) {
574 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
575 /* nothing to do */
576 err = 0;
577 goto out_mutex;
578 }
579
580 /*remove from list of promisc qps */
581 list_del(&pqp->list);
582
583 /* set the default entry not to include the removed one */
584 mailbox = mlx4_alloc_cmd_mailbox(dev);
585 if (IS_ERR(mailbox)) {
586 err = -ENOMEM;
587 back_to_list = true;
588 goto out_list;
589 }
590 mgm = mailbox->buf;
591 members_count = 0;
592 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
593 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
594 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
595
596 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
597 if (err)
598 goto out_mailbox;
599
600 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
601 /* Remove the QP from all the steering entries */
602 list_for_each_entry_safe(entry, tmp_entry,
603 &s_steer->steer_entries[steer],
604 list) {
605 found = false;
606 list_for_each_entry(dqp, &entry->duplicates, list) {
607 if (dqp->qpn == qpn) {
608 found = true;
609 break;
610 }
611 }
612 if (found) {
613 /* A duplicate, no need to change the MGM,
614 * only update the duplicates list
615 */
616 list_del(&dqp->list);
617 kfree(dqp);
618 } else {
619 int loc = -1;
620
621 err = mlx4_READ_ENTRY(dev,
622 entry->index,
623 mailbox);
624 if (err)
625 goto out_mailbox;
626 members_count =
627 be32_to_cpu(mgm->members_count) &
628 0xffffff;
629 if (!members_count) {
630 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n",
631 qpn, entry->index);
632 list_del(&entry->list);
633 kfree(entry);
634 continue;
635 }
636
637 for (i = 0; i < members_count; ++i)
638 if ((be32_to_cpu(mgm->qp[i]) &
639 MGM_QPN_MASK) == qpn) {
640 loc = i;
641 break;
642 }
643
644 if (loc < 0) {
645 mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
646 qpn, entry->index);
647 err = -EINVAL;
648 goto out_mailbox;
649 }
650
651 /* Copy the last QP in this MGM
652 * over removed QP
653 */
654 mgm->qp[loc] = mgm->qp[members_count - 1];
655 mgm->qp[members_count - 1] = 0;
656 mgm->members_count =
657 cpu_to_be32(--members_count |
658 (MLX4_PROT_ETH << 30));
659
660 err = mlx4_WRITE_ENTRY(dev,
661 entry->index,
662 mailbox);
663 if (err)
664 goto out_mailbox;
665 }
666 }
667 }
668
669 out_mailbox:
670 mlx4_free_cmd_mailbox(dev, mailbox);
671 out_list:
672 if (back_to_list)
673 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
674 else
675 kfree(pqp);
676 out_mutex:
677 mutex_unlock(&priv->mcg_table.mutex);
678 return err;
679 }
680
681 /*
682 * Caller must hold MCG table semaphore. gid and mgm parameters must
683 * be properly aligned for command interface.
684 *
685 * Returns 0 unless a firmware command error occurs.
686 *
687 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
688 * and *mgm holds MGM entry.
689 *
690 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
691 * previous entry in hash chain and *mgm holds AMGM entry.
692 *
693 * If no AMGM exists for given gid, *index = -1, *prev = index of last
694 * entry in hash chain and *mgm holds end of hash chain.
695 */
find_entry(struct mlx4_dev * dev,u8 port,u8 * gid,enum mlx4_protocol prot,struct mlx4_cmd_mailbox * mgm_mailbox,int * prev,int * index)696 static int find_entry(struct mlx4_dev *dev, u8 port,
697 u8 *gid, enum mlx4_protocol prot,
698 struct mlx4_cmd_mailbox *mgm_mailbox,
699 int *prev, int *index)
700 {
701 struct mlx4_cmd_mailbox *mailbox;
702 struct mlx4_mgm *mgm = mgm_mailbox->buf;
703 u8 *mgid;
704 int err;
705 u16 hash;
706 u8 op_mod = (prot == MLX4_PROT_ETH) ?
707 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
708
709 mailbox = mlx4_alloc_cmd_mailbox(dev);
710 if (IS_ERR(mailbox))
711 return -ENOMEM;
712 mgid = mailbox->buf;
713
714 memcpy(mgid, gid, 16);
715
716 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
717 mlx4_free_cmd_mailbox(dev, mailbox);
718 if (err)
719 return err;
720
721 if (0)
722 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
723
724 *index = hash;
725 *prev = -1;
726
727 do {
728 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
729 if (err)
730 return err;
731
732 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
733 if (*index != hash) {
734 mlx4_err(dev, "Found zero MGID in AMGM\n");
735 err = -EINVAL;
736 }
737 return err;
738 }
739
740 if (!memcmp(mgm->gid, gid, 16) &&
741 be32_to_cpu(mgm->members_count) >> 30 == prot)
742 return err;
743
744 *prev = *index;
745 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
746 } while (*index);
747
748 *index = -1;
749 return err;
750 }
751
752 static const u8 __promisc_mode[] = {
753 [MLX4_FS_REGULAR] = 0x0,
754 [MLX4_FS_ALL_DEFAULT] = 0x1,
755 [MLX4_FS_MC_DEFAULT] = 0x3,
756 [MLX4_FS_UC_SNIFFER] = 0x4,
757 [MLX4_FS_MC_SNIFFER] = 0x5,
758 };
759
mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev * dev,enum mlx4_net_trans_promisc_mode flow_type)760 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
761 enum mlx4_net_trans_promisc_mode flow_type)
762 {
763 if (flow_type >= MLX4_FS_MODE_NUM) {
764 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
765 return -EINVAL;
766 }
767 return __promisc_mode[flow_type];
768 }
769 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
770
trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule * ctrl,struct mlx4_net_trans_rule_hw_ctrl * hw)771 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
772 struct mlx4_net_trans_rule_hw_ctrl *hw)
773 {
774 u8 flags = 0;
775
776 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
777 flags |= ctrl->exclusive ? (1 << 2) : 0;
778 flags |= ctrl->allow_loopback ? (1 << 3) : 0;
779
780 hw->flags = flags;
781 hw->type = __promisc_mode[ctrl->promisc_mode];
782 hw->prio = cpu_to_be16(ctrl->priority);
783 hw->port = ctrl->port;
784 hw->qpn = cpu_to_be32(ctrl->qpn);
785 }
786
787 const u16 __sw_id_hw[] = {
788 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
789 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
790 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
791 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
792 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
793 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
794 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
795 };
796
mlx4_map_sw_to_hw_steering_id(struct mlx4_dev * dev,enum mlx4_net_trans_rule_id id)797 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
798 enum mlx4_net_trans_rule_id id)
799 {
800 if (id >= MLX4_NET_TRANS_RULE_NUM) {
801 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
802 return -EINVAL;
803 }
804 return __sw_id_hw[id];
805 }
806 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
807
808 static const int __rule_hw_sz[] = {
809 [MLX4_NET_TRANS_RULE_ID_ETH] =
810 sizeof(struct mlx4_net_trans_rule_hw_eth),
811 [MLX4_NET_TRANS_RULE_ID_IB] =
812 sizeof(struct mlx4_net_trans_rule_hw_ib),
813 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
814 [MLX4_NET_TRANS_RULE_ID_IPV4] =
815 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
816 [MLX4_NET_TRANS_RULE_ID_TCP] =
817 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
818 [MLX4_NET_TRANS_RULE_ID_UDP] =
819 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
820 [MLX4_NET_TRANS_RULE_ID_VXLAN] =
821 sizeof(struct mlx4_net_trans_rule_hw_vxlan)
822 };
823
mlx4_hw_rule_sz(struct mlx4_dev * dev,enum mlx4_net_trans_rule_id id)824 int mlx4_hw_rule_sz(struct mlx4_dev *dev,
825 enum mlx4_net_trans_rule_id id)
826 {
827 if (id >= MLX4_NET_TRANS_RULE_NUM) {
828 mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
829 return -EINVAL;
830 }
831
832 return __rule_hw_sz[id];
833 }
834 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
835
parse_trans_rule(struct mlx4_dev * dev,struct mlx4_spec_list * spec,struct _rule_hw * rule_hw)836 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
837 struct _rule_hw *rule_hw)
838 {
839 if (mlx4_hw_rule_sz(dev, spec->id) < 0)
840 return -EINVAL;
841 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
842 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
843 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
844
845 switch (spec->id) {
846 case MLX4_NET_TRANS_RULE_ID_ETH:
847 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
848 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
849 ETH_ALEN);
850 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
851 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
852 ETH_ALEN);
853 if (spec->eth.ether_type_enable) {
854 rule_hw->eth.ether_type_enable = 1;
855 rule_hw->eth.ether_type = spec->eth.ether_type;
856 }
857 rule_hw->eth.vlan_tag = spec->eth.vlan_id;
858 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
859 break;
860
861 case MLX4_NET_TRANS_RULE_ID_IB:
862 rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
863 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
864 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
865 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
866 break;
867
868 case MLX4_NET_TRANS_RULE_ID_IPV6:
869 return -EOPNOTSUPP;
870
871 case MLX4_NET_TRANS_RULE_ID_IPV4:
872 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
873 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
874 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
875 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
876 break;
877
878 case MLX4_NET_TRANS_RULE_ID_TCP:
879 case MLX4_NET_TRANS_RULE_ID_UDP:
880 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
881 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
882 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
883 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
884 break;
885
886 case MLX4_NET_TRANS_RULE_ID_VXLAN:
887 rule_hw->vxlan.vni =
888 cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
889 rule_hw->vxlan.vni_mask =
890 cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
891 break;
892
893 default:
894 return -EINVAL;
895 }
896
897 return __rule_hw_sz[spec->id];
898 }
899
mlx4_err_rule(struct mlx4_dev * dev,char * str,struct mlx4_net_trans_rule * rule)900 static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
901 struct mlx4_net_trans_rule *rule)
902 {
903 #define BUF_SIZE 256
904 struct mlx4_spec_list *cur;
905 char buf[BUF_SIZE];
906 int len = 0;
907
908 mlx4_err(dev, "%s", str);
909 len += snprintf(buf + len, BUF_SIZE - len,
910 "port = %d prio = 0x%x qp = 0x%x ",
911 rule->port, rule->priority, rule->qpn);
912
913 list_for_each_entry(cur, &rule->list, list) {
914 switch (cur->id) {
915 case MLX4_NET_TRANS_RULE_ID_ETH:
916 len += snprintf(buf + len, BUF_SIZE - len,
917 "dmac = %pM ", &cur->eth.dst_mac);
918 if (cur->eth.ether_type)
919 len += snprintf(buf + len, BUF_SIZE - len,
920 "ethertype = 0x%x ",
921 be16_to_cpu(cur->eth.ether_type));
922 if (cur->eth.vlan_id)
923 len += snprintf(buf + len, BUF_SIZE - len,
924 "vlan-id = %d ",
925 be16_to_cpu(cur->eth.vlan_id));
926 break;
927
928 case MLX4_NET_TRANS_RULE_ID_IPV4:
929 if (cur->ipv4.src_ip)
930 len += snprintf(buf + len, BUF_SIZE - len,
931 "src-ip = %pI4 ",
932 &cur->ipv4.src_ip);
933 if (cur->ipv4.dst_ip)
934 len += snprintf(buf + len, BUF_SIZE - len,
935 "dst-ip = %pI4 ",
936 &cur->ipv4.dst_ip);
937 break;
938
939 case MLX4_NET_TRANS_RULE_ID_TCP:
940 case MLX4_NET_TRANS_RULE_ID_UDP:
941 if (cur->tcp_udp.src_port)
942 len += snprintf(buf + len, BUF_SIZE - len,
943 "src-port = %d ",
944 be16_to_cpu(cur->tcp_udp.src_port));
945 if (cur->tcp_udp.dst_port)
946 len += snprintf(buf + len, BUF_SIZE - len,
947 "dst-port = %d ",
948 be16_to_cpu(cur->tcp_udp.dst_port));
949 break;
950
951 case MLX4_NET_TRANS_RULE_ID_IB:
952 len += snprintf(buf + len, BUF_SIZE - len,
953 "dst-gid = %pI6\n", cur->ib.dst_gid);
954 len += snprintf(buf + len, BUF_SIZE - len,
955 "dst-gid-mask = %pI6\n",
956 cur->ib.dst_gid_msk);
957 break;
958
959 case MLX4_NET_TRANS_RULE_ID_VXLAN:
960 len += snprintf(buf + len, BUF_SIZE - len,
961 "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
962 break;
963 case MLX4_NET_TRANS_RULE_ID_IPV6:
964 break;
965
966 default:
967 break;
968 }
969 }
970 len += snprintf(buf + len, BUF_SIZE - len, "\n");
971 mlx4_err(dev, "%s", buf);
972
973 if (len >= BUF_SIZE)
974 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
975 }
976
mlx4_flow_attach(struct mlx4_dev * dev,struct mlx4_net_trans_rule * rule,u64 * reg_id)977 int mlx4_flow_attach(struct mlx4_dev *dev,
978 struct mlx4_net_trans_rule *rule, u64 *reg_id)
979 {
980 struct mlx4_cmd_mailbox *mailbox;
981 struct mlx4_spec_list *cur;
982 u32 size = 0;
983 int ret;
984
985 mailbox = mlx4_alloc_cmd_mailbox(dev);
986 if (IS_ERR(mailbox))
987 return PTR_ERR(mailbox);
988
989 if (!mlx4_qp_lookup(dev, rule->qpn)) {
990 mlx4_err_rule(dev, "QP doesn't exist\n", rule);
991 ret = -EINVAL;
992 goto out;
993 }
994
995 trans_rule_ctrl_to_hw(rule, mailbox->buf);
996
997 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
998
999 list_for_each_entry(cur, &rule->list, list) {
1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
1001 if (ret < 0)
1002 goto out;
1003
1004 size += ret;
1005 }
1006
1007 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
1008 if (ret == -ENOMEM) {
1009 mlx4_err_rule(dev,
1010 "mcg table is full. Fail to register network rule\n",
1011 rule);
1012 } else if (ret) {
1013 if (ret == -ENXIO) {
1014 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
1015 mlx4_err_rule(dev,
1016 "DMFS is not enabled, "
1017 "failed to register network rule.\n",
1018 rule);
1019 else
1020 mlx4_err_rule(dev,
1021 "Rule exceeds the dmfs_high_rate_mode limitations, "
1022 "failed to register network rule.\n",
1023 rule);
1024
1025 } else {
1026 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
1027 }
1028 }
1029
1030 out:
1031 mlx4_free_cmd_mailbox(dev, mailbox);
1032
1033 return ret;
1034 }
1035 EXPORT_SYMBOL_GPL(mlx4_flow_attach);
1036
mlx4_flow_detach(struct mlx4_dev * dev,u64 reg_id)1037 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
1038 {
1039 int err;
1040
1041 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
1042 if (err)
1043 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
1044 reg_id);
1045 return err;
1046 }
1047 EXPORT_SYMBOL_GPL(mlx4_flow_detach);
1048
mlx4_tunnel_steer_add(struct mlx4_dev * dev,unsigned char * addr,int port,int qpn,u16 prio,u64 * reg_id)1049 int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1050 int port, int qpn, u16 prio, u64 *reg_id)
1051 {
1052 int err;
1053 struct mlx4_spec_list spec_eth_outer = { {NULL} };
1054 struct mlx4_spec_list spec_vxlan = { {NULL} };
1055 struct mlx4_spec_list spec_eth_inner = { {NULL} };
1056
1057 struct mlx4_net_trans_rule rule = {
1058 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1059 .exclusive = 0,
1060 .allow_loopback = 1,
1061 .promisc_mode = MLX4_FS_REGULAR,
1062 };
1063
1064 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1065
1066 rule.port = port;
1067 rule.qpn = qpn;
1068 rule.priority = prio;
1069 INIT_LIST_HEAD(&rule.list);
1070
1071 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
1072 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
1073 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1074
1075 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
1076 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
1077
1078 list_add_tail(&spec_eth_outer.list, &rule.list);
1079 list_add_tail(&spec_vxlan.list, &rule.list);
1080 list_add_tail(&spec_eth_inner.list, &rule.list);
1081
1082 err = mlx4_flow_attach(dev, &rule, reg_id);
1083 return err;
1084 }
1085 EXPORT_SYMBOL(mlx4_tunnel_steer_add);
1086
mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev * dev,u32 min_range_qpn,u32 max_range_qpn)1087 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1088 u32 max_range_qpn)
1089 {
1090 int err;
1091 u64 in_param;
1092
1093 in_param = ((u64) min_range_qpn) << 32;
1094 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
1095
1096 err = mlx4_cmd(dev, in_param, 0, 0,
1097 MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1098 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1099
1100 return err;
1101 }
1102 EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
1103
mlx4_qp_attach_common(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],int block_mcast_loopback,enum mlx4_protocol prot,enum mlx4_steer_type steer)1104 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1105 int block_mcast_loopback, enum mlx4_protocol prot,
1106 enum mlx4_steer_type steer)
1107 {
1108 struct mlx4_priv *priv = mlx4_priv(dev);
1109 struct mlx4_cmd_mailbox *mailbox;
1110 struct mlx4_mgm *mgm;
1111 u32 members_count;
1112 int index = -1, prev;
1113 int link = 0;
1114 int i;
1115 int err;
1116 u8 port = gid[5];
1117 u8 new_entry = 0;
1118
1119 mailbox = mlx4_alloc_cmd_mailbox(dev);
1120 if (IS_ERR(mailbox))
1121 return PTR_ERR(mailbox);
1122 mgm = mailbox->buf;
1123
1124 mutex_lock(&priv->mcg_table.mutex);
1125 err = find_entry(dev, port, gid, prot,
1126 mailbox, &prev, &index);
1127 if (err)
1128 goto out;
1129
1130 if (index != -1) {
1131 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
1132 new_entry = 1;
1133 memcpy(mgm->gid, gid, 16);
1134 }
1135 } else {
1136 link = 1;
1137
1138 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
1139 if (index == -1) {
1140 mlx4_err(dev, "No AMGM entries left\n");
1141 err = -ENOMEM;
1142 goto out;
1143 }
1144 index += dev->caps.num_mgms;
1145
1146 new_entry = 1;
1147 memset(mgm, 0, sizeof *mgm);
1148 memcpy(mgm->gid, gid, 16);
1149 }
1150
1151 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1152 if (members_count == dev->caps.num_qp_per_mgm) {
1153 mlx4_err(dev, "MGM at index %x is full\n", index);
1154 err = -ENOMEM;
1155 goto out;
1156 }
1157
1158 for (i = 0; i < members_count; ++i)
1159 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1160 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
1161 err = 0;
1162 goto out;
1163 }
1164
1165 if (block_mcast_loopback)
1166 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
1167 (1U << MGM_BLCK_LB_BIT));
1168 else
1169 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
1170
1171 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
1172
1173 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1174 if (err)
1175 goto out;
1176
1177 if (!link)
1178 goto out;
1179
1180 err = mlx4_READ_ENTRY(dev, prev, mailbox);
1181 if (err)
1182 goto out;
1183
1184 mgm->next_gid_index = cpu_to_be32(index << 6);
1185
1186 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1187 if (err)
1188 goto out;
1189
1190 out:
1191 if (prot == MLX4_PROT_ETH && index != -1) {
1192 /* manage the steering entry for promisc mode */
1193 if (new_entry)
1194 err = new_steering_entry(dev, port, steer,
1195 index, qp->qpn);
1196 else
1197 err = existing_steering_entry(dev, port, steer,
1198 index, qp->qpn);
1199 }
1200 if (err && link && index != -1) {
1201 if (index < dev->caps.num_mgms)
1202 mlx4_warn(dev, "Got AMGM index %d < %d\n",
1203 index, dev->caps.num_mgms);
1204 else
1205 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1206 index - dev->caps.num_mgms, MLX4_USE_RR);
1207 }
1208 mutex_unlock(&priv->mcg_table.mutex);
1209
1210 mlx4_free_cmd_mailbox(dev, mailbox);
1211 return err;
1212 }
1213
mlx4_qp_detach_common(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot,enum mlx4_steer_type steer)1214 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1215 enum mlx4_protocol prot, enum mlx4_steer_type steer)
1216 {
1217 struct mlx4_priv *priv = mlx4_priv(dev);
1218 struct mlx4_cmd_mailbox *mailbox;
1219 struct mlx4_mgm *mgm;
1220 u32 members_count;
1221 int prev, index;
1222 int i, loc = -1;
1223 int err;
1224 u8 port = gid[5];
1225 bool removed_entry = false;
1226
1227 mailbox = mlx4_alloc_cmd_mailbox(dev);
1228 if (IS_ERR(mailbox))
1229 return PTR_ERR(mailbox);
1230 mgm = mailbox->buf;
1231
1232 mutex_lock(&priv->mcg_table.mutex);
1233
1234 err = find_entry(dev, port, gid, prot,
1235 mailbox, &prev, &index);
1236 if (err)
1237 goto out;
1238
1239 if (index == -1) {
1240 mlx4_err(dev, "MGID %pI6 not found\n", gid);
1241 err = -EINVAL;
1242 goto out;
1243 }
1244
1245 /* If this QP is also a promisc QP, it shouldn't be removed only if
1246 * at least one none promisc QP is also attached to this MCG
1247 */
1248 if (prot == MLX4_PROT_ETH &&
1249 check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
1250 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
1251 goto out;
1252
1253 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1254 for (i = 0; i < members_count; ++i)
1255 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
1256 loc = i;
1257 break;
1258 }
1259
1260 if (loc == -1) {
1261 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
1262 err = -EINVAL;
1263 goto out;
1264 }
1265
1266 /* copy the last QP in this MGM over removed QP */
1267 mgm->qp[loc] = mgm->qp[members_count - 1];
1268 mgm->qp[members_count - 1] = 0;
1269 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
1270
1271 if (prot == MLX4_PROT_ETH)
1272 removed_entry = can_remove_steering_entry(dev, port, steer,
1273 index, qp->qpn);
1274 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
1275 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1276 goto out;
1277 }
1278
1279 /* We are going to delete the entry, members count should be 0 */
1280 mgm->members_count = cpu_to_be32((u32) prot << 30);
1281
1282 if (prev == -1) {
1283 /* Remove entry from MGM */
1284 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1285 if (amgm_index) {
1286 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
1287 if (err)
1288 goto out;
1289 } else
1290 memset(mgm->gid, 0, 16);
1291
1292 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1293 if (err)
1294 goto out;
1295
1296 if (amgm_index) {
1297 if (amgm_index < dev->caps.num_mgms)
1298 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
1299 index, amgm_index, dev->caps.num_mgms);
1300 else
1301 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1302 amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
1303 }
1304 } else {
1305 /* Remove entry from AMGM */
1306 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1307 err = mlx4_READ_ENTRY(dev, prev, mailbox);
1308 if (err)
1309 goto out;
1310
1311 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
1312
1313 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1314 if (err)
1315 goto out;
1316
1317 if (index < dev->caps.num_mgms)
1318 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
1319 prev, index, dev->caps.num_mgms);
1320 else
1321 mlx4_bitmap_free(&priv->mcg_table.bitmap,
1322 index - dev->caps.num_mgms, MLX4_USE_RR);
1323 }
1324
1325 out:
1326 mutex_unlock(&priv->mcg_table.mutex);
1327
1328 mlx4_free_cmd_mailbox(dev, mailbox);
1329 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1330 /* In case device is under an error, return success as a closing command */
1331 err = 0;
1332 return err;
1333 }
1334
mlx4_QP_ATTACH(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],u8 attach,u8 block_loopback,enum mlx4_protocol prot)1335 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1336 u8 gid[16], u8 attach, u8 block_loopback,
1337 enum mlx4_protocol prot)
1338 {
1339 struct mlx4_cmd_mailbox *mailbox;
1340 int err = 0;
1341 int qpn;
1342
1343 if (!mlx4_is_mfunc(dev))
1344 return -EBADF;
1345
1346 mailbox = mlx4_alloc_cmd_mailbox(dev);
1347 if (IS_ERR(mailbox))
1348 return PTR_ERR(mailbox);
1349
1350 memcpy(mailbox->buf, gid, 16);
1351 qpn = qp->qpn;
1352 qpn |= (prot << 28);
1353 if (attach && block_loopback)
1354 qpn |= (1 << 31);
1355
1356 err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
1357 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
1358 MLX4_CMD_WRAPPED);
1359
1360 mlx4_free_cmd_mailbox(dev, mailbox);
1361 if (err && !attach &&
1362 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1363 err = 0;
1364 return err;
1365 }
1366
mlx4_trans_to_dmfs_attach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],u8 port,int block_mcast_loopback,enum mlx4_protocol prot,u64 * reg_id)1367 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1368 u8 gid[16], u8 port,
1369 int block_mcast_loopback,
1370 enum mlx4_protocol prot, u64 *reg_id)
1371 {
1372 struct mlx4_spec_list spec = { {NULL} };
1373 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1374
1375 struct mlx4_net_trans_rule rule = {
1376 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1377 .exclusive = 0,
1378 .promisc_mode = MLX4_FS_REGULAR,
1379 .priority = MLX4_DOMAIN_NIC,
1380 };
1381
1382 rule.allow_loopback = !block_mcast_loopback;
1383 rule.port = port;
1384 rule.qpn = qp->qpn;
1385 INIT_LIST_HEAD(&rule.list);
1386
1387 switch (prot) {
1388 case MLX4_PROT_ETH:
1389 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1390 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1391 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1392 break;
1393
1394 case MLX4_PROT_IB_IPV6:
1395 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
1396 memcpy(spec.ib.dst_gid, gid, 16);
1397 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1398 break;
1399 default:
1400 return -EINVAL;
1401 }
1402 list_add_tail(&spec.list, &rule.list);
1403
1404 return mlx4_flow_attach(dev, &rule, reg_id);
1405 }
1406
mlx4_multicast_attach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],u8 port,int block_mcast_loopback,enum mlx4_protocol prot,u64 * reg_id)1407 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1408 u8 port, int block_mcast_loopback,
1409 enum mlx4_protocol prot, u64 *reg_id)
1410 {
1411 switch (dev->caps.steering_mode) {
1412 case MLX4_STEERING_MODE_A0:
1413 if (prot == MLX4_PROT_ETH)
1414 return 0;
1415
1416 case MLX4_STEERING_MODE_B0:
1417 if (prot == MLX4_PROT_ETH)
1418 gid[7] |= (MLX4_MC_STEER << 1);
1419
1420 if (mlx4_is_mfunc(dev))
1421 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1422 block_mcast_loopback, prot);
1423 return mlx4_qp_attach_common(dev, qp, gid,
1424 block_mcast_loopback, prot,
1425 MLX4_MC_STEER);
1426
1427 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1428 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
1429 block_mcast_loopback,
1430 prot, reg_id);
1431 default:
1432 return -EINVAL;
1433 }
1434 }
1435 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
1436
mlx4_multicast_detach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot,u64 reg_id)1437 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1438 enum mlx4_protocol prot, u64 reg_id)
1439 {
1440 switch (dev->caps.steering_mode) {
1441 case MLX4_STEERING_MODE_A0:
1442 if (prot == MLX4_PROT_ETH)
1443 return 0;
1444
1445 case MLX4_STEERING_MODE_B0:
1446 if (prot == MLX4_PROT_ETH)
1447 gid[7] |= (MLX4_MC_STEER << 1);
1448
1449 if (mlx4_is_mfunc(dev))
1450 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1451
1452 return mlx4_qp_detach_common(dev, qp, gid, prot,
1453 MLX4_MC_STEER);
1454
1455 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1456 return mlx4_flow_detach(dev, reg_id);
1457
1458 default:
1459 return -EINVAL;
1460 }
1461 }
1462 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
1463
mlx4_flow_steer_promisc_add(struct mlx4_dev * dev,u8 port,u32 qpn,enum mlx4_net_trans_promisc_mode mode)1464 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1465 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1466 {
1467 struct mlx4_net_trans_rule rule = {
1468 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1469 .exclusive = 0,
1470 .allow_loopback = 1,
1471 };
1472
1473 u64 *regid_p;
1474
1475 switch (mode) {
1476 case MLX4_FS_ALL_DEFAULT:
1477 regid_p = &dev->regid_promisc_array[port];
1478 break;
1479 case MLX4_FS_MC_DEFAULT:
1480 regid_p = &dev->regid_allmulti_array[port];
1481 break;
1482 default:
1483 return -1;
1484 }
1485
1486 if (*regid_p != 0)
1487 return -1;
1488
1489 rule.promisc_mode = mode;
1490 rule.port = port;
1491 rule.qpn = qpn;
1492 INIT_LIST_HEAD(&rule.list);
1493 mlx4_info(dev, "going promisc on %x\n", port);
1494
1495 return mlx4_flow_attach(dev, &rule, regid_p);
1496 }
1497 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1498
mlx4_flow_steer_promisc_remove(struct mlx4_dev * dev,u8 port,enum mlx4_net_trans_promisc_mode mode)1499 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1500 enum mlx4_net_trans_promisc_mode mode)
1501 {
1502 int ret;
1503 u64 *regid_p;
1504
1505 switch (mode) {
1506 case MLX4_FS_ALL_DEFAULT:
1507 regid_p = &dev->regid_promisc_array[port];
1508 break;
1509 case MLX4_FS_MC_DEFAULT:
1510 regid_p = &dev->regid_allmulti_array[port];
1511 break;
1512 default:
1513 return -1;
1514 }
1515
1516 if (*regid_p == 0)
1517 return -1;
1518
1519 ret = mlx4_flow_detach(dev, *regid_p);
1520 if (ret == 0)
1521 *regid_p = 0;
1522
1523 return ret;
1524 }
1525 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1526
mlx4_unicast_attach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],int block_mcast_loopback,enum mlx4_protocol prot)1527 int mlx4_unicast_attach(struct mlx4_dev *dev,
1528 struct mlx4_qp *qp, u8 gid[16],
1529 int block_mcast_loopback, enum mlx4_protocol prot)
1530 {
1531 if (prot == MLX4_PROT_ETH)
1532 gid[7] |= (MLX4_UC_STEER << 1);
1533
1534 if (mlx4_is_mfunc(dev))
1535 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1536 block_mcast_loopback, prot);
1537
1538 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
1539 prot, MLX4_UC_STEER);
1540 }
1541 EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
1542
mlx4_unicast_detach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot)1543 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1544 u8 gid[16], enum mlx4_protocol prot)
1545 {
1546 if (prot == MLX4_PROT_ETH)
1547 gid[7] |= (MLX4_UC_STEER << 1);
1548
1549 if (mlx4_is_mfunc(dev))
1550 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1551
1552 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
1553 }
1554 EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
1555
mlx4_PROMISC_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1556 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1557 struct mlx4_vhcr *vhcr,
1558 struct mlx4_cmd_mailbox *inbox,
1559 struct mlx4_cmd_mailbox *outbox,
1560 struct mlx4_cmd_info *cmd)
1561 {
1562 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1563 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
1564 enum mlx4_steer_type steer = vhcr->in_modifier;
1565
1566 if (port < 0)
1567 return -EINVAL;
1568
1569 /* Promiscuous unicast is not allowed in mfunc */
1570 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1571 return 0;
1572
1573 if (vhcr->op_modifier)
1574 return add_promisc_qp(dev, port, steer, qpn);
1575 else
1576 return remove_promisc_qp(dev, port, steer, qpn);
1577 }
1578
mlx4_PROMISC(struct mlx4_dev * dev,u32 qpn,enum mlx4_steer_type steer,u8 add,u8 port)1579 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
1580 enum mlx4_steer_type steer, u8 add, u8 port)
1581 {
1582 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
1583 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
1584 MLX4_CMD_WRAPPED);
1585 }
1586
mlx4_multicast_promisc_add(struct mlx4_dev * dev,u32 qpn,u8 port)1587 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1588 {
1589 if (mlx4_is_mfunc(dev))
1590 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
1591
1592 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1593 }
1594 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
1595
mlx4_multicast_promisc_remove(struct mlx4_dev * dev,u32 qpn,u8 port)1596 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1597 {
1598 if (mlx4_is_mfunc(dev))
1599 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
1600
1601 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1602 }
1603 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1604
mlx4_unicast_promisc_add(struct mlx4_dev * dev,u32 qpn,u8 port)1605 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1606 {
1607 if (mlx4_is_mfunc(dev))
1608 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1609
1610 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1611 }
1612 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1613
mlx4_unicast_promisc_remove(struct mlx4_dev * dev,u32 qpn,u8 port)1614 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1615 {
1616 if (mlx4_is_mfunc(dev))
1617 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1618
1619 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1620 }
1621 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
1622
mlx4_init_mcg_table(struct mlx4_dev * dev)1623 int mlx4_init_mcg_table(struct mlx4_dev *dev)
1624 {
1625 struct mlx4_priv *priv = mlx4_priv(dev);
1626 int err;
1627
1628 /* No need for mcg_table when fw managed the mcg table*/
1629 if (dev->caps.steering_mode ==
1630 MLX4_STEERING_MODE_DEVICE_MANAGED)
1631 return 0;
1632 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1633 dev->caps.num_amgms - 1, 0, 0);
1634 if (err)
1635 return err;
1636
1637 mutex_init(&priv->mcg_table.mutex);
1638
1639 return 0;
1640 }
1641
mlx4_cleanup_mcg_table(struct mlx4_dev * dev)1642 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1643 {
1644 if (dev->caps.steering_mode !=
1645 MLX4_STEERING_MODE_DEVICE_MANAGED)
1646 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1647 }
1648