• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "cxgb4.h"
36 #include "t4_regs.h"
37 #include "l2t.h"
38 #include "t4fw_api.h"
39 #include "cxgb4_filter.h"
40 
is_field_set(u32 val,u32 mask)41 static inline bool is_field_set(u32 val, u32 mask)
42 {
43 	return val || mask;
44 }
45 
unsupported(u32 conf,u32 conf_mask,u32 val,u32 mask)46 static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
47 {
48 	return !(conf & conf_mask) && is_field_set(val, mask);
49 }
50 
51 /* Validate filter spec against configuration done on the card. */
validate_filter(struct net_device * dev,struct ch_filter_specification * fs)52 static int validate_filter(struct net_device *dev,
53 			   struct ch_filter_specification *fs)
54 {
55 	struct adapter *adapter = netdev2adap(dev);
56 	u32 fconf, iconf;
57 
58 	/* Check for unconfigured fields being used. */
59 	fconf = adapter->params.tp.vlan_pri_map;
60 	iconf = adapter->params.tp.ingress_config;
61 
62 	if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
63 	    unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
64 	    unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
65 	    unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
66 			fs->mask.ethtype) ||
67 	    unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
68 	    unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
69 			fs->mask.matchtype) ||
70 	    unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
71 	    unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
72 	    unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
73 			fs->mask.pfvf_vld) ||
74 	    unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
75 			fs->mask.ovlan_vld) ||
76 	    unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
77 		return -EOPNOTSUPP;
78 
79 	/* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
80 	 * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
81 	 * in TP_INGRESS_CONFIG.  Hense the somewhat crazy checks
82 	 * below.  Additionally, since the T4 firmware interface also
83 	 * carries that overlap, we need to translate any PF/VF
84 	 * specification into that internal format below.
85 	 */
86 	if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
87 	    is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
88 		return -EOPNOTSUPP;
89 	if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
90 	    (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
91 	     (iconf & VNIC_F)))
92 		return -EOPNOTSUPP;
93 	if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
94 		return -ERANGE;
95 	fs->mask.pf &= 0x7;
96 	fs->mask.vf &= 0x7f;
97 
98 	/* If the user is requesting that the filter action loop
99 	 * matching packets back out one of our ports, make sure that
100 	 * the egress port is in range.
101 	 */
102 	if (fs->action == FILTER_SWITCH &&
103 	    fs->eport >= adapter->params.nports)
104 		return -ERANGE;
105 
106 	/* Don't allow various trivially obvious bogus out-of-range values... */
107 	if (fs->val.iport >= adapter->params.nports)
108 		return -ERANGE;
109 
110 	/* T4 doesn't support removing VLAN Tags for loop back filters. */
111 	if (is_t4(adapter->params.chip) &&
112 	    fs->action == FILTER_SWITCH &&
113 	    (fs->newvlan == VLAN_REMOVE ||
114 	     fs->newvlan == VLAN_REWRITE))
115 		return -EOPNOTSUPP;
116 
117 	return 0;
118 }
119 
get_filter_steerq(struct net_device * dev,struct ch_filter_specification * fs)120 static int get_filter_steerq(struct net_device *dev,
121 			     struct ch_filter_specification *fs)
122 {
123 	struct adapter *adapter = netdev2adap(dev);
124 	int iq;
125 
126 	/* If the user has requested steering matching Ingress Packets
127 	 * to a specific Queue Set, we need to make sure it's in range
128 	 * for the port and map that into the Absolute Queue ID of the
129 	 * Queue Set's Response Queue.
130 	 */
131 	if (!fs->dirsteer) {
132 		if (fs->iq)
133 			return -EINVAL;
134 		iq = 0;
135 	} else {
136 		struct port_info *pi = netdev_priv(dev);
137 
138 		/* If the iq id is greater than the number of qsets,
139 		 * then assume it is an absolute qid.
140 		 */
141 		if (fs->iq < pi->nqsets)
142 			iq = adapter->sge.ethrxq[pi->first_qset +
143 						 fs->iq].rspq.abs_id;
144 		else
145 			iq = fs->iq;
146 	}
147 
148 	return iq;
149 }
150 
cxgb4_set_ftid(struct tid_info * t,int fidx,int family)151 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
152 {
153 	spin_lock_bh(&t->ftid_lock);
154 
155 	if (test_bit(fidx, t->ftid_bmap)) {
156 		spin_unlock_bh(&t->ftid_lock);
157 		return -EBUSY;
158 	}
159 
160 	if (family == PF_INET)
161 		__set_bit(fidx, t->ftid_bmap);
162 	else
163 		bitmap_allocate_region(t->ftid_bmap, fidx, 2);
164 
165 	spin_unlock_bh(&t->ftid_lock);
166 	return 0;
167 }
168 
cxgb4_clear_ftid(struct tid_info * t,int fidx,int family)169 static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
170 {
171 	spin_lock_bh(&t->ftid_lock);
172 	if (family == PF_INET)
173 		__clear_bit(fidx, t->ftid_bmap);
174 	else
175 		bitmap_release_region(t->ftid_bmap, fidx, 2);
176 	spin_unlock_bh(&t->ftid_lock);
177 }
178 
179 /* Delete the filter at a specified index. */
del_filter_wr(struct adapter * adapter,int fidx)180 static int del_filter_wr(struct adapter *adapter, int fidx)
181 {
182 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
183 	struct fw_filter_wr *fwr;
184 	struct sk_buff *skb;
185 	unsigned int len;
186 
187 	len = sizeof(*fwr);
188 
189 	skb = alloc_skb(len, GFP_KERNEL);
190 	if (!skb)
191 		return -ENOMEM;
192 
193 	fwr = __skb_put(skb, len);
194 	t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
195 
196 	/* Mark the filter as "pending" and ship off the Filter Work Request.
197 	 * When we get the Work Request Reply we'll clear the pending status.
198 	 */
199 	f->pending = 1;
200 	t4_mgmt_tx(adapter, skb);
201 	return 0;
202 }
203 
204 /* Send a Work Request to write the filter at a specified index.  We construct
205  * a Firmware Filter Work Request to have the work done and put the indicated
206  * filter into "pending" mode which will prevent any further actions against
207  * it till we get a reply from the firmware on the completion status of the
208  * request.
209  */
set_filter_wr(struct adapter * adapter,int fidx)210 int set_filter_wr(struct adapter *adapter, int fidx)
211 {
212 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
213 	struct fw_filter_wr *fwr;
214 	struct sk_buff *skb;
215 
216 	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
217 	if (!skb)
218 		return -ENOMEM;
219 
220 	/* If the new filter requires loopback Destination MAC and/or VLAN
221 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
222 	 * the filter.
223 	 */
224 	if (f->fs.newdmac || f->fs.newvlan) {
225 		/* allocate L2T entry for new filter */
226 		f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
227 						f->fs.eport, f->fs.dmac);
228 		if (!f->l2t) {
229 			kfree_skb(skb);
230 			return -ENOMEM;
231 		}
232 	}
233 
234 	fwr = __skb_put_zero(skb, sizeof(*fwr));
235 
236 	/* It would be nice to put most of the following in t4_hw.c but most
237 	 * of the work is translating the cxgbtool ch_filter_specification
238 	 * into the Work Request and the definition of that structure is
239 	 * currently in cxgbtool.h which isn't appropriate to pull into the
240 	 * common code.  We may eventually try to come up with a more neutral
241 	 * filter specification structure but for now it's easiest to simply
242 	 * put this fairly direct code in line ...
243 	 */
244 	fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
245 	fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
246 	fwr->tid_to_iq =
247 		htonl(FW_FILTER_WR_TID_V(f->tid) |
248 		      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
249 		      FW_FILTER_WR_NOREPLY_V(0) |
250 		      FW_FILTER_WR_IQ_V(f->fs.iq));
251 	fwr->del_filter_to_l2tix =
252 		htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
253 		      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
254 		      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
255 		      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
256 		      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
257 		      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
258 		      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
259 		      FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
260 		      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
261 					     f->fs.newvlan == VLAN_REWRITE) |
262 		      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
263 					    f->fs.newvlan == VLAN_REWRITE) |
264 		      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
265 		      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
266 		      FW_FILTER_WR_PRIO_V(f->fs.prio) |
267 		      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
268 	fwr->ethtype = htons(f->fs.val.ethtype);
269 	fwr->ethtypem = htons(f->fs.mask.ethtype);
270 	fwr->frag_to_ovlan_vldm =
271 		(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
272 		 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
273 		 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
274 		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
275 		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
276 		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
277 	fwr->smac_sel = 0;
278 	fwr->rx_chan_rx_rpl_iq =
279 		htons(FW_FILTER_WR_RX_CHAN_V(0) |
280 		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
281 	fwr->maci_to_matchtypem =
282 		htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
283 		      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
284 		      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
285 		      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
286 		      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
287 		      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
288 		      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
289 		      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
290 	fwr->ptcl = f->fs.val.proto;
291 	fwr->ptclm = f->fs.mask.proto;
292 	fwr->ttyp = f->fs.val.tos;
293 	fwr->ttypm = f->fs.mask.tos;
294 	fwr->ivlan = htons(f->fs.val.ivlan);
295 	fwr->ivlanm = htons(f->fs.mask.ivlan);
296 	fwr->ovlan = htons(f->fs.val.ovlan);
297 	fwr->ovlanm = htons(f->fs.mask.ovlan);
298 	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
299 	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
300 	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
301 	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
302 	fwr->lp = htons(f->fs.val.lport);
303 	fwr->lpm = htons(f->fs.mask.lport);
304 	fwr->fp = htons(f->fs.val.fport);
305 	fwr->fpm = htons(f->fs.mask.fport);
306 	if (f->fs.newsmac)
307 		memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
308 
309 	/* Mark the filter as "pending" and ship off the Filter Work Request.
310 	 * When we get the Work Request Reply we'll clear the pending status.
311 	 */
312 	f->pending = 1;
313 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
314 	t4_ofld_send(adapter, skb);
315 	return 0;
316 }
317 
318 /* Return an error number if the indicated filter isn't writable ... */
writable_filter(struct filter_entry * f)319 int writable_filter(struct filter_entry *f)
320 {
321 	if (f->locked)
322 		return -EPERM;
323 	if (f->pending)
324 		return -EBUSY;
325 
326 	return 0;
327 }
328 
329 /* Delete the filter at the specified index (if valid).  The checks for all
330  * the common problems with doing this like the filter being locked, currently
331  * pending in another operation, etc.
332  */
delete_filter(struct adapter * adapter,unsigned int fidx)333 int delete_filter(struct adapter *adapter, unsigned int fidx)
334 {
335 	struct filter_entry *f;
336 	int ret;
337 
338 	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
339 		return -EINVAL;
340 
341 	f = &adapter->tids.ftid_tab[fidx];
342 	ret = writable_filter(f);
343 	if (ret)
344 		return ret;
345 	if (f->valid)
346 		return del_filter_wr(adapter, fidx);
347 
348 	return 0;
349 }
350 
351 /* Clear a filter and release any of its resources that we own.  This also
352  * clears the filter's "pending" status.
353  */
clear_filter(struct adapter * adap,struct filter_entry * f)354 void clear_filter(struct adapter *adap, struct filter_entry *f)
355 {
356 	/* If the new or old filter have loopback rewriteing rules then we'll
357 	 * need to free any existing Layer Two Table (L2T) entries of the old
358 	 * filter rule.  The firmware will handle freeing up any Source MAC
359 	 * Table (SMT) entries used for rewriting Source MAC Addresses in
360 	 * loopback rules.
361 	 */
362 	if (f->l2t)
363 		cxgb4_l2t_release(f->l2t);
364 
365 	/* The zeroing of the filter rule below clears the filter valid,
366 	 * pending, locked flags, l2t pointer, etc. so it's all we need for
367 	 * this operation.
368 	 */
369 	memset(f, 0, sizeof(*f));
370 }
371 
clear_all_filters(struct adapter * adapter)372 void clear_all_filters(struct adapter *adapter)
373 {
374 	unsigned int i;
375 
376 	if (adapter->tids.ftid_tab) {
377 		struct filter_entry *f = &adapter->tids.ftid_tab[0];
378 		unsigned int max_ftid = adapter->tids.nftids +
379 					adapter->tids.nsftids;
380 
381 		for (i = 0; i < max_ftid; i++, f++)
382 			if (f->valid || f->pending)
383 				clear_filter(adapter, f);
384 	}
385 }
386 
387 /* Fill up default masks for set match fields. */
fill_default_mask(struct ch_filter_specification * fs)388 static void fill_default_mask(struct ch_filter_specification *fs)
389 {
390 	unsigned int lip = 0, lip_mask = 0;
391 	unsigned int fip = 0, fip_mask = 0;
392 	unsigned int i;
393 
394 	if (fs->val.iport && !fs->mask.iport)
395 		fs->mask.iport |= ~0;
396 	if (fs->val.fcoe && !fs->mask.fcoe)
397 		fs->mask.fcoe |= ~0;
398 	if (fs->val.matchtype && !fs->mask.matchtype)
399 		fs->mask.matchtype |= ~0;
400 	if (fs->val.macidx && !fs->mask.macidx)
401 		fs->mask.macidx |= ~0;
402 	if (fs->val.ethtype && !fs->mask.ethtype)
403 		fs->mask.ethtype |= ~0;
404 	if (fs->val.ivlan && !fs->mask.ivlan)
405 		fs->mask.ivlan |= ~0;
406 	if (fs->val.ovlan && !fs->mask.ovlan)
407 		fs->mask.ovlan |= ~0;
408 	if (fs->val.frag && !fs->mask.frag)
409 		fs->mask.frag |= ~0;
410 	if (fs->val.tos && !fs->mask.tos)
411 		fs->mask.tos |= ~0;
412 	if (fs->val.proto && !fs->mask.proto)
413 		fs->mask.proto |= ~0;
414 
415 	for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
416 		lip |= fs->val.lip[i];
417 		lip_mask |= fs->mask.lip[i];
418 		fip |= fs->val.fip[i];
419 		fip_mask |= fs->mask.fip[i];
420 	}
421 
422 	if (lip && !lip_mask)
423 		memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
424 
425 	if (fip && !fip_mask)
426 		memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
427 
428 	if (fs->val.lport && !fs->mask.lport)
429 		fs->mask.lport = ~0;
430 	if (fs->val.fport && !fs->mask.fport)
431 		fs->mask.fport = ~0;
432 }
433 
434 /* Check a Chelsio Filter Request for validity, convert it into our internal
435  * format and send it to the hardware.  Return 0 on success, an error number
436  * otherwise.  We attach any provided filter operation context to the internal
437  * filter specification in order to facilitate signaling completion of the
438  * operation.
439  */
__cxgb4_set_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs,struct filter_ctx * ctx)440 int __cxgb4_set_filter(struct net_device *dev, int filter_id,
441 		       struct ch_filter_specification *fs,
442 		       struct filter_ctx *ctx)
443 {
444 	struct adapter *adapter = netdev2adap(dev);
445 	unsigned int max_fidx, fidx;
446 	struct filter_entry *f;
447 	u32 iconf;
448 	int iq, ret;
449 
450 	max_fidx = adapter->tids.nftids;
451 	if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
452 	    filter_id >= max_fidx)
453 		return -E2BIG;
454 
455 	fill_default_mask(fs);
456 
457 	ret = validate_filter(dev, fs);
458 	if (ret)
459 		return ret;
460 
461 	iq = get_filter_steerq(dev, fs);
462 	if (iq < 0)
463 		return iq;
464 
465 	/* IPv6 filters occupy four slots and must be aligned on
466 	 * four-slot boundaries.  IPv4 filters only occupy a single
467 	 * slot and have no alignment requirements but writing a new
468 	 * IPv4 filter into the middle of an existing IPv6 filter
469 	 * requires clearing the old IPv6 filter and hence we prevent
470 	 * insertion.
471 	 */
472 	if (fs->type == 0) { /* IPv4 */
473 		/* If our IPv4 filter isn't being written to a
474 		 * multiple of four filter index and there's an IPv6
475 		 * filter at the multiple of 4 base slot, then we
476 		 * prevent insertion.
477 		 */
478 		fidx = filter_id & ~0x3;
479 		if (fidx != filter_id &&
480 		    adapter->tids.ftid_tab[fidx].fs.type) {
481 			f = &adapter->tids.ftid_tab[fidx];
482 			if (f->valid) {
483 				dev_err(adapter->pdev_dev,
484 					"Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
485 					fidx, fidx + 3);
486 				return -EINVAL;
487 			}
488 		}
489 	} else { /* IPv6 */
490 		/* Ensure that the IPv6 filter is aligned on a
491 		 * multiple of 4 boundary.
492 		 */
493 		if (filter_id & 0x3) {
494 			dev_err(adapter->pdev_dev,
495 				"Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
496 			return -EINVAL;
497 		}
498 
499 		/* Check all except the base overlapping IPv4 filter slots. */
500 		for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
501 			f = &adapter->tids.ftid_tab[fidx];
502 			if (f->valid) {
503 				dev_err(adapter->pdev_dev,
504 					"Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
505 					fidx);
506 				return -EINVAL;
507 			}
508 		}
509 	}
510 
511 	/* Check to make sure that provided filter index is not
512 	 * already in use by someone else
513 	 */
514 	f = &adapter->tids.ftid_tab[filter_id];
515 	if (f->valid)
516 		return -EBUSY;
517 
518 	fidx = filter_id + adapter->tids.ftid_base;
519 	ret = cxgb4_set_ftid(&adapter->tids, filter_id,
520 			     fs->type ? PF_INET6 : PF_INET);
521 	if (ret)
522 		return ret;
523 
524 	/* Check to make sure the filter requested is writable ... */
525 	ret = writable_filter(f);
526 	if (ret) {
527 		/* Clear the bits we have set above */
528 		cxgb4_clear_ftid(&adapter->tids, filter_id,
529 				 fs->type ? PF_INET6 : PF_INET);
530 		return ret;
531 	}
532 
533 	/* Clear out any old resources being used by the filter before
534 	 * we start constructing the new filter.
535 	 */
536 	if (f->valid)
537 		clear_filter(adapter, f);
538 
539 	/* Convert the filter specification into our internal format.
540 	 * We copy the PF/VF specification into the Outer VLAN field
541 	 * here so the rest of the code -- including the interface to
542 	 * the firmware -- doesn't have to constantly do these checks.
543 	 */
544 	f->fs = *fs;
545 	f->fs.iq = iq;
546 	f->dev = dev;
547 
548 	iconf = adapter->params.tp.ingress_config;
549 	if (iconf & VNIC_F) {
550 		f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
551 		f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
552 		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
553 		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
554 	}
555 
556 	/* Attempt to set the filter.  If we don't succeed, we clear
557 	 * it and return the failure.
558 	 */
559 	f->ctx = ctx;
560 	f->tid = fidx; /* Save the actual tid */
561 	ret = set_filter_wr(adapter, filter_id);
562 	if (ret) {
563 		cxgb4_clear_ftid(&adapter->tids, filter_id,
564 				 fs->type ? PF_INET6 : PF_INET);
565 		clear_filter(adapter, f);
566 	}
567 
568 	return ret;
569 }
570 
571 /* Check a delete filter request for validity and send it to the hardware.
572  * Return 0 on success, an error number otherwise.  We attach any provided
573  * filter operation context to the internal filter specification in order to
574  * facilitate signaling completion of the operation.
575  */
__cxgb4_del_filter(struct net_device * dev,int filter_id,struct filter_ctx * ctx)576 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
577 		       struct filter_ctx *ctx)
578 {
579 	struct adapter *adapter = netdev2adap(dev);
580 	struct filter_entry *f;
581 	unsigned int max_fidx;
582 	int ret;
583 
584 	max_fidx = adapter->tids.nftids;
585 	if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
586 	    filter_id >= max_fidx)
587 		return -E2BIG;
588 
589 	f = &adapter->tids.ftid_tab[filter_id];
590 	ret = writable_filter(f);
591 	if (ret)
592 		return ret;
593 
594 	if (f->valid) {
595 		f->ctx = ctx;
596 		cxgb4_clear_ftid(&adapter->tids, filter_id,
597 				 f->fs.type ? PF_INET6 : PF_INET);
598 		return del_filter_wr(adapter, filter_id);
599 	}
600 
601 	/* If the caller has passed in a Completion Context then we need to
602 	 * mark it as a successful completion so they don't stall waiting
603 	 * for it.
604 	 */
605 	if (ctx) {
606 		ctx->result = 0;
607 		complete(&ctx->completion);
608 	}
609 	return ret;
610 }
611 
cxgb4_set_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs)612 int cxgb4_set_filter(struct net_device *dev, int filter_id,
613 		     struct ch_filter_specification *fs)
614 {
615 	struct filter_ctx ctx;
616 	int ret;
617 
618 	init_completion(&ctx.completion);
619 
620 	ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
621 	if (ret)
622 		goto out;
623 
624 	/* Wait for reply */
625 	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
626 	if (!ret)
627 		return -ETIMEDOUT;
628 
629 	ret = ctx.result;
630 out:
631 	return ret;
632 }
633 
cxgb4_del_filter(struct net_device * dev,int filter_id)634 int cxgb4_del_filter(struct net_device *dev, int filter_id)
635 {
636 	struct filter_ctx ctx;
637 	int ret;
638 
639 	init_completion(&ctx.completion);
640 
641 	ret = __cxgb4_del_filter(dev, filter_id, &ctx);
642 	if (ret)
643 		goto out;
644 
645 	/* Wait for reply */
646 	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
647 	if (!ret)
648 		return -ETIMEDOUT;
649 
650 	ret = ctx.result;
651 out:
652 	return ret;
653 }
654 
655 /* Handle a filter write/deletion reply. */
filter_rpl(struct adapter * adap,const struct cpl_set_tcb_rpl * rpl)656 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
657 {
658 	unsigned int tid = GET_TID(rpl);
659 	struct filter_entry *f = NULL;
660 	unsigned int max_fidx;
661 	int idx;
662 
663 	max_fidx = adap->tids.nftids + adap->tids.nsftids;
664 	/* Get the corresponding filter entry for this tid */
665 	if (adap->tids.ftid_tab) {
666 		/* Check this in normal filter region */
667 		idx = tid - adap->tids.ftid_base;
668 		if (idx >= max_fidx)
669 			return;
670 		f = &adap->tids.ftid_tab[idx];
671 		if (f->tid != tid)
672 			return;
673 	}
674 
675 	/* We found the filter entry for this tid */
676 	if (f) {
677 		unsigned int ret = TCB_COOKIE_G(rpl->cookie);
678 		struct filter_ctx *ctx;
679 
680 		/* Pull off any filter operation context attached to the
681 		 * filter.
682 		 */
683 		ctx = f->ctx;
684 		f->ctx = NULL;
685 
686 		if (ret == FW_FILTER_WR_FLT_DELETED) {
687 			/* Clear the filter when we get confirmation from the
688 			 * hardware that the filter has been deleted.
689 			 */
690 			clear_filter(adap, f);
691 			if (ctx)
692 				ctx->result = 0;
693 		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
694 			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
695 				idx);
696 			clear_filter(adap, f);
697 			if (ctx)
698 				ctx->result = -ENOMEM;
699 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
700 			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
701 			f->pending = 0;  /* asynchronous setup completed */
702 			f->valid = 1;
703 			if (ctx) {
704 				ctx->result = 0;
705 				ctx->tid = idx;
706 			}
707 		} else {
708 			/* Something went wrong.  Issue a warning about the
709 			 * problem and clear everything out.
710 			 */
711 			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
712 				idx, ret);
713 			clear_filter(adap, f);
714 			if (ctx)
715 				ctx->result = -EINVAL;
716 		}
717 		if (ctx)
718 			complete(&ctx->completion);
719 	}
720 }
721