1 /*
2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
36 */
37
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
47
48 #include "cxgb4.h"
49 #include "cxgb4_uld.h"
50 #include "t4_regs.h"
51 #include "t4fw_api.h"
52 #include "t4_msg.h"
53
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
55
get_msix_idx_from_bmap(struct adapter * adap)56 static int get_msix_idx_from_bmap(struct adapter *adap)
57 {
58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
59 unsigned long flags;
60 unsigned int msix_idx;
61
62 spin_lock_irqsave(&bmap->lock, flags);
63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64 if (msix_idx < bmap->mapsize) {
65 __set_bit(msix_idx, bmap->msix_bmap);
66 } else {
67 spin_unlock_irqrestore(&bmap->lock, flags);
68 return -ENOSPC;
69 }
70
71 spin_unlock_irqrestore(&bmap->lock, flags);
72 return msix_idx;
73 }
74
free_msix_idx_in_bmap(struct adapter * adap,unsigned int msix_idx)75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
76 {
77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
78 unsigned long flags;
79
80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags);
83 }
84
85 /* Flush the aggregated lro sessions */
uldrx_flush_handler(struct sge_rspq * q)86 static void uldrx_flush_handler(struct sge_rspq *q)
87 {
88 struct adapter *adap = q->adap;
89
90 if (adap->uld[q->uld].lro_flush)
91 adap->uld[q->uld].lro_flush(&q->lro_mgr);
92 }
93
94 /**
95 * uldrx_handler - response queue handler for ULD queues
96 * @q: the response queue that received the packet
97 * @rsp: the response queue descriptor holding the offload message
98 * @gl: the gather list of packet fragments
99 *
100 * Deliver an ingress offload packet to a ULD. All processing is done by
101 * the ULD, we just maintain statistics.
102 */
uldrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
104 const struct pkt_gl *gl)
105 {
106 struct adapter *adap = q->adap;
107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
108 int ret;
109
110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
113 rsp += 2;
114
115 if (q->flush_handler)
116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
117 rsp, gl, &q->lro_mgr,
118 &q->napi);
119 else
120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
121 rsp, gl);
122
123 if (ret) {
124 rxq->stats.nomem++;
125 return -1;
126 }
127
128 if (!gl)
129 rxq->stats.imm++;
130 else if (gl == CXGB4_MSG_AN)
131 rxq->stats.an++;
132 else
133 rxq->stats.pkts++;
134 return 0;
135 }
136
alloc_uld_rxqs(struct adapter * adap,struct sge_uld_rxq_info * rxq_info,bool lro)137 static int alloc_uld_rxqs(struct adapter *adap,
138 struct sge_uld_rxq_info *rxq_info, bool lro)
139 {
140 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
141 int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
142 struct sge_ofld_rxq *q = rxq_info->uldrxq;
143 unsigned short *ids = rxq_info->rspq_id;
144 struct sge *s = &adap->sge;
145 unsigned int per_chan;
146
147 per_chan = rxq_info->nrxq / adap->params.nports;
148
149 if (adap->flags & USING_MSIX)
150 msi_idx = 1;
151 else
152 msi_idx = -((int)s->intrq.abs_id + 1);
153
154 for (i = 0; i < nq; i++, q++) {
155 if (i == rxq_info->nrxq) {
156 /* start allocation of concentrator queues */
157 per_chan = rxq_info->nciq / adap->params.nports;
158 que_idx = 0;
159 }
160
161 if (msi_idx >= 0) {
162 bmap_idx = get_msix_idx_from_bmap(adap);
163 if (bmap_idx < 0) {
164 err = -ENOSPC;
165 goto freeout;
166 }
167 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
168 }
169 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
170 adap->port[que_idx++ / per_chan],
171 msi_idx,
172 q->fl.size ? &q->fl : NULL,
173 uldrx_handler,
174 lro ? uldrx_flush_handler : NULL,
175 0);
176 if (err)
177 goto freeout;
178 if (msi_idx >= 0)
179 rxq_info->msix_tbl[i] = bmap_idx;
180 memset(&q->stats, 0, sizeof(q->stats));
181 if (ids)
182 ids[i] = q->rspq.abs_id;
183 }
184 return 0;
185 freeout:
186 q = rxq_info->uldrxq;
187 for ( ; i; i--, q++) {
188 if (q->rspq.desc)
189 free_rspq_fl(adap, &q->rspq,
190 q->fl.size ? &q->fl : NULL);
191 }
192 return err;
193 }
194
195 static int
setup_sge_queues_uld(struct adapter * adap,unsigned int uld_type,bool lro)196 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
197 {
198 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
199 int i, ret = 0;
200
201 if (adap->flags & USING_MSIX) {
202 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
203 sizeof(unsigned short),
204 GFP_KERNEL);
205 if (!rxq_info->msix_tbl)
206 return -ENOMEM;
207 }
208
209 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
210
211 /* Tell uP to route control queue completions to rdma rspq */
212 if (adap->flags & FULL_INIT_DONE &&
213 !ret && uld_type == CXGB4_ULD_RDMA) {
214 struct sge *s = &adap->sge;
215 unsigned int cmplqid;
216 u32 param, cmdop;
217
218 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
219 for_each_port(adap, i) {
220 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
221 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
222 FW_PARAMS_PARAM_X_V(cmdop) |
223 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
224 ret = t4_set_params(adap, adap->mbox, adap->pf,
225 0, 1, ¶m, &cmplqid);
226 }
227 }
228 return ret;
229 }
230
t4_free_uld_rxqs(struct adapter * adap,int n,struct sge_ofld_rxq * q)231 static void t4_free_uld_rxqs(struct adapter *adap, int n,
232 struct sge_ofld_rxq *q)
233 {
234 for ( ; n; n--, q++) {
235 if (q->rspq.desc)
236 free_rspq_fl(adap, &q->rspq,
237 q->fl.size ? &q->fl : NULL);
238 }
239 }
240
free_sge_queues_uld(struct adapter * adap,unsigned int uld_type)241 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
242 {
243 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
244
245 if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
246 struct sge *s = &adap->sge;
247 u32 param, cmdop, cmplqid = 0;
248 int i;
249
250 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
251 for_each_port(adap, i) {
252 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
253 FW_PARAMS_PARAM_X_V(cmdop) |
254 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
255 t4_set_params(adap, adap->mbox, adap->pf,
256 0, 1, ¶m, &cmplqid);
257 }
258 }
259
260 if (rxq_info->nciq)
261 t4_free_uld_rxqs(adap, rxq_info->nciq,
262 rxq_info->uldrxq + rxq_info->nrxq);
263 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
264 if (adap->flags & USING_MSIX)
265 kfree(rxq_info->msix_tbl);
266 }
267
cfg_queues_uld(struct adapter * adap,unsigned int uld_type,const struct cxgb4_uld_info * uld_info)268 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
269 const struct cxgb4_uld_info *uld_info)
270 {
271 struct sge *s = &adap->sge;
272 struct sge_uld_rxq_info *rxq_info;
273 int i, nrxq, ciq_size;
274
275 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
276 if (!rxq_info)
277 return -ENOMEM;
278
279 if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
280 i = s->nqs_per_uld;
281 rxq_info->nrxq = roundup(i, adap->params.nports);
282 } else {
283 i = min_t(int, uld_info->nrxq,
284 num_online_cpus());
285 rxq_info->nrxq = roundup(i, adap->params.nports);
286 }
287 if (!uld_info->ciq) {
288 rxq_info->nciq = 0;
289 } else {
290 if (adap->flags & USING_MSIX)
291 rxq_info->nciq = min_t(int, s->nqs_per_uld,
292 num_online_cpus());
293 else
294 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
295 num_online_cpus());
296 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
297 adap->params.nports);
298 rxq_info->nciq = max_t(int, rxq_info->nciq,
299 adap->params.nports);
300 }
301
302 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
303 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
304 GFP_KERNEL);
305 if (!rxq_info->uldrxq) {
306 kfree(rxq_info);
307 return -ENOMEM;
308 }
309
310 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
311 if (!rxq_info->rspq_id) {
312 kfree(rxq_info->uldrxq);
313 kfree(rxq_info);
314 return -ENOMEM;
315 }
316
317 for (i = 0; i < rxq_info->nrxq; i++) {
318 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
319
320 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
321 r->rspq.uld = uld_type;
322 r->fl.size = 72;
323 }
324
325 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
326 if (ciq_size > SGE_MAX_IQ_SIZE) {
327 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
328 ciq_size = SGE_MAX_IQ_SIZE;
329 }
330
331 for (i = rxq_info->nrxq; i < nrxq; i++) {
332 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
333
334 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
335 r->rspq.uld = uld_type;
336 }
337
338 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
339 adap->sge.uld_rxq_info[uld_type] = rxq_info;
340
341 return 0;
342 }
343
free_queues_uld(struct adapter * adap,unsigned int uld_type)344 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
345 {
346 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
347
348 adap->sge.uld_rxq_info[uld_type] = NULL;
349 kfree(rxq_info->rspq_id);
350 kfree(rxq_info->uldrxq);
351 kfree(rxq_info);
352 }
353
354 static int
request_msix_queue_irqs_uld(struct adapter * adap,unsigned int uld_type)355 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
356 {
357 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
358 int err = 0;
359 unsigned int idx, bmap_idx;
360
361 for_each_uldrxq(rxq_info, idx) {
362 bmap_idx = rxq_info->msix_tbl[idx];
363 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
364 t4_sge_intr_msix, 0,
365 adap->msix_info_ulds[bmap_idx].desc,
366 &rxq_info->uldrxq[idx].rspq);
367 if (err)
368 goto unwind;
369 }
370 return 0;
371 unwind:
372 while (idx-- > 0) {
373 bmap_idx = rxq_info->msix_tbl[idx];
374 free_msix_idx_in_bmap(adap, bmap_idx);
375 free_irq(adap->msix_info_ulds[bmap_idx].vec,
376 &rxq_info->uldrxq[idx].rspq);
377 }
378 return err;
379 }
380
381 static void
free_msix_queue_irqs_uld(struct adapter * adap,unsigned int uld_type)382 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
383 {
384 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
385 unsigned int idx, bmap_idx;
386
387 for_each_uldrxq(rxq_info, idx) {
388 bmap_idx = rxq_info->msix_tbl[idx];
389
390 free_msix_idx_in_bmap(adap, bmap_idx);
391 free_irq(adap->msix_info_ulds[bmap_idx].vec,
392 &rxq_info->uldrxq[idx].rspq);
393 }
394 }
395
name_msix_vecs_uld(struct adapter * adap,unsigned int uld_type)396 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
397 {
398 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
399 int n = sizeof(adap->msix_info_ulds[0].desc);
400 unsigned int idx, bmap_idx;
401
402 for_each_uldrxq(rxq_info, idx) {
403 bmap_idx = rxq_info->msix_tbl[idx];
404
405 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
406 adap->port[0]->name, rxq_info->name, idx);
407 }
408 }
409
enable_rx(struct adapter * adap,struct sge_rspq * q)410 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
411 {
412 if (!q)
413 return;
414
415 if (q->handler)
416 napi_enable(&q->napi);
417
418 /* 0-increment GTS to start the timer and enable interrupts */
419 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
420 SEINTARM_V(q->intr_params) |
421 INGRESSQID_V(q->cntxt_id));
422 }
423
quiesce_rx(struct adapter * adap,struct sge_rspq * q)424 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
425 {
426 if (q && q->handler)
427 napi_disable(&q->napi);
428 }
429
enable_rx_uld(struct adapter * adap,unsigned int uld_type)430 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
431 {
432 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
433 int idx;
434
435 for_each_uldrxq(rxq_info, idx)
436 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
437 }
438
quiesce_rx_uld(struct adapter * adap,unsigned int uld_type)439 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
440 {
441 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
442 int idx;
443
444 for_each_uldrxq(rxq_info, idx)
445 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
446 }
447
448 static void
free_sge_txq_uld(struct adapter * adap,struct sge_uld_txq_info * txq_info)449 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
450 {
451 int nq = txq_info->ntxq;
452 int i;
453
454 for (i = 0; i < nq; i++) {
455 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
456
457 if (txq && txq->q.desc) {
458 tasklet_kill(&txq->qresume_tsk);
459 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
460 txq->q.cntxt_id);
461 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
462 kfree(txq->q.sdesc);
463 __skb_queue_purge(&txq->sendq);
464 free_txq(adap, &txq->q);
465 }
466 }
467 }
468
469 static int
alloc_sge_txq_uld(struct adapter * adap,struct sge_uld_txq_info * txq_info,unsigned int uld_type)470 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
471 unsigned int uld_type)
472 {
473 struct sge *s = &adap->sge;
474 int nq = txq_info->ntxq;
475 int i, j, err;
476
477 j = nq / adap->params.nports;
478 for (i = 0; i < nq; i++) {
479 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
480
481 txq->q.size = 1024;
482 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
483 s->fw_evtq.cntxt_id, uld_type);
484 if (err)
485 goto freeout;
486 }
487 return 0;
488 freeout:
489 free_sge_txq_uld(adap, txq_info);
490 return err;
491 }
492
493 static void
release_sge_txq_uld(struct adapter * adap,unsigned int uld_type)494 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
495 {
496 struct sge_uld_txq_info *txq_info = NULL;
497 int tx_uld_type = TX_ULD(uld_type);
498
499 txq_info = adap->sge.uld_txq_info[tx_uld_type];
500
501 if (txq_info && atomic_dec_and_test(&txq_info->users)) {
502 free_sge_txq_uld(adap, txq_info);
503 kfree(txq_info->uldtxq);
504 kfree(txq_info);
505 adap->sge.uld_txq_info[tx_uld_type] = NULL;
506 }
507 }
508
509 static int
setup_sge_txq_uld(struct adapter * adap,unsigned int uld_type,const struct cxgb4_uld_info * uld_info)510 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
511 const struct cxgb4_uld_info *uld_info)
512 {
513 struct sge_uld_txq_info *txq_info = NULL;
514 int tx_uld_type, i;
515
516 tx_uld_type = TX_ULD(uld_type);
517 txq_info = adap->sge.uld_txq_info[tx_uld_type];
518
519 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
520 (atomic_inc_return(&txq_info->users) > 1))
521 return 0;
522
523 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
524 if (!txq_info)
525 return -ENOMEM;
526
527 i = min_t(int, uld_info->ntxq, num_online_cpus());
528 txq_info->ntxq = roundup(i, adap->params.nports);
529
530 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
531 GFP_KERNEL);
532 if (!txq_info->uldtxq) {
533 kfree(txq_info);
534 return -ENOMEM;
535 }
536
537 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
538 kfree(txq_info->uldtxq);
539 kfree(txq_info);
540 return -ENOMEM;
541 }
542
543 atomic_inc(&txq_info->users);
544 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
545 return 0;
546 }
547
uld_queue_init(struct adapter * adap,unsigned int uld_type,struct cxgb4_lld_info * lli)548 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
549 struct cxgb4_lld_info *lli)
550 {
551 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
552
553 lli->rxq_ids = rxq_info->rspq_id;
554 lli->nrxq = rxq_info->nrxq;
555 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
556 lli->nciq = rxq_info->nciq;
557 }
558
t4_uld_mem_alloc(struct adapter * adap)559 int t4_uld_mem_alloc(struct adapter *adap)
560 {
561 struct sge *s = &adap->sge;
562
563 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
564 if (!adap->uld)
565 return -ENOMEM;
566
567 s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
568 sizeof(struct sge_uld_rxq_info *),
569 GFP_KERNEL);
570 if (!s->uld_rxq_info)
571 goto err_uld;
572
573 s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
574 sizeof(struct sge_uld_txq_info *),
575 GFP_KERNEL);
576 if (!s->uld_txq_info)
577 goto err_uld_rx;
578 return 0;
579
580 err_uld_rx:
581 kfree(s->uld_rxq_info);
582 err_uld:
583 kfree(adap->uld);
584 return -ENOMEM;
585 }
586
t4_uld_mem_free(struct adapter * adap)587 void t4_uld_mem_free(struct adapter *adap)
588 {
589 struct sge *s = &adap->sge;
590
591 kfree(s->uld_txq_info);
592 kfree(s->uld_rxq_info);
593 kfree(adap->uld);
594 }
595
596 /* This function should be called with uld_mutex taken. */
cxgb4_shutdown_uld_adapter(struct adapter * adap,enum cxgb4_uld type)597 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
598 {
599 if (adap->uld[type].handle) {
600 adap->uld[type].handle = NULL;
601 adap->uld[type].add = NULL;
602 release_sge_txq_uld(adap, type);
603
604 if (adap->flags & FULL_INIT_DONE)
605 quiesce_rx_uld(adap, type);
606
607 if (adap->flags & USING_MSIX)
608 free_msix_queue_irqs_uld(adap, type);
609
610 free_sge_queues_uld(adap, type);
611 free_queues_uld(adap, type);
612 }
613 }
614
t4_uld_clean_up(struct adapter * adap)615 void t4_uld_clean_up(struct adapter *adap)
616 {
617 unsigned int i;
618
619 mutex_lock(&uld_mutex);
620 for (i = 0; i < CXGB4_ULD_MAX; i++) {
621 if (!adap->uld[i].handle)
622 continue;
623
624 cxgb4_shutdown_uld_adapter(adap, i);
625 }
626 mutex_unlock(&uld_mutex);
627 }
628
uld_init(struct adapter * adap,struct cxgb4_lld_info * lld)629 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
630 {
631 int i;
632
633 lld->pdev = adap->pdev;
634 lld->pf = adap->pf;
635 lld->l2t = adap->l2t;
636 lld->tids = &adap->tids;
637 lld->ports = adap->port;
638 lld->vr = &adap->vres;
639 lld->mtus = adap->params.mtus;
640 lld->ntxq = adap->sge.ofldqsets;
641 lld->nchan = adap->params.nports;
642 lld->nports = adap->params.nports;
643 lld->wr_cred = adap->params.ofldq_wr_cred;
644 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
645 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
646 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
647 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
648 lld->iscsi_ppm = &adap->iscsi_ppm;
649 lld->adapter_type = adap->params.chip;
650 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
651 lld->udb_density = 1 << adap->params.sge.eq_qpp;
652 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
653 lld->filt_mode = adap->params.tp.vlan_pri_map;
654 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
655 for (i = 0; i < NCHAN; i++)
656 lld->tx_modq[i] = i;
657 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
658 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
659 lld->fw_vers = adap->params.fw_vers;
660 lld->dbfifo_int_thresh = dbfifo_int_thresh;
661 lld->sge_ingpadboundary = adap->sge.fl_align;
662 lld->sge_egrstatuspagesize = adap->sge.stat_len;
663 lld->sge_pktshift = adap->sge.pktshift;
664 lld->ulp_crypto = adap->params.crypto;
665 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
666 lld->max_ordird_qp = adap->params.max_ordird_qp;
667 lld->max_ird_adapter = adap->params.max_ird_adapter;
668 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
669 lld->nodeid = dev_to_node(adap->pdev_dev);
670 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
671 }
672
uld_attach(struct adapter * adap,unsigned int uld)673 static int uld_attach(struct adapter *adap, unsigned int uld)
674 {
675 struct cxgb4_lld_info lli;
676 void *handle;
677
678 uld_init(adap, &lli);
679 uld_queue_init(adap, uld, &lli);
680
681 handle = adap->uld[uld].add(&lli);
682 if (IS_ERR(handle)) {
683 dev_warn(adap->pdev_dev,
684 "could not attach to the %s driver, error %ld\n",
685 adap->uld[uld].name, PTR_ERR(handle));
686 return PTR_ERR(handle);
687 }
688
689 adap->uld[uld].handle = handle;
690 t4_register_netevent_notifier();
691
692 if (adap->flags & FULL_INIT_DONE)
693 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
694
695 return 0;
696 }
697
698 /* cxgb4_register_uld - register an upper-layer driver
699 * @type: the ULD type
700 * @p: the ULD methods
701 *
702 * Registers an upper-layer driver with this driver and notifies the ULD
703 * about any presently available devices that support its type. Returns
704 * %-EBUSY if a ULD of the same type is already registered.
705 */
cxgb4_register_uld(enum cxgb4_uld type,const struct cxgb4_uld_info * p)706 int cxgb4_register_uld(enum cxgb4_uld type,
707 const struct cxgb4_uld_info *p)
708 {
709 unsigned int adap_idx = 0;
710 struct adapter *adap;
711 int ret = 0;
712
713 if (type >= CXGB4_ULD_MAX)
714 return -EINVAL;
715
716 mutex_lock(&uld_mutex);
717 list_for_each_entry(adap, &adapter_list, list_node) {
718 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
719 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
720 continue;
721 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
722 continue;
723 ret = cfg_queues_uld(adap, type, p);
724 if (ret)
725 goto out;
726 ret = setup_sge_queues_uld(adap, type, p->lro);
727 if (ret)
728 goto free_queues;
729 if (adap->flags & USING_MSIX) {
730 name_msix_vecs_uld(adap, type);
731 ret = request_msix_queue_irqs_uld(adap, type);
732 if (ret)
733 goto free_rxq;
734 }
735 if (adap->flags & FULL_INIT_DONE)
736 enable_rx_uld(adap, type);
737 if (adap->uld[type].add) {
738 ret = -EBUSY;
739 goto free_irq;
740 }
741 ret = setup_sge_txq_uld(adap, type, p);
742 if (ret)
743 goto free_irq;
744 adap->uld[type] = *p;
745 ret = uld_attach(adap, type);
746 if (ret)
747 goto free_txq;
748 adap_idx++;
749 }
750 mutex_unlock(&uld_mutex);
751 return 0;
752
753 free_txq:
754 release_sge_txq_uld(adap, type);
755 free_irq:
756 if (adap->flags & FULL_INIT_DONE)
757 quiesce_rx_uld(adap, type);
758 if (adap->flags & USING_MSIX)
759 free_msix_queue_irqs_uld(adap, type);
760 free_rxq:
761 free_sge_queues_uld(adap, type);
762 free_queues:
763 free_queues_uld(adap, type);
764 out:
765
766 list_for_each_entry(adap, &adapter_list, list_node) {
767 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
768 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
769 continue;
770 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
771 continue;
772 if (!adap_idx)
773 break;
774 adap->uld[type].handle = NULL;
775 adap->uld[type].add = NULL;
776 release_sge_txq_uld(adap, type);
777 if (adap->flags & FULL_INIT_DONE)
778 quiesce_rx_uld(adap, type);
779 if (adap->flags & USING_MSIX)
780 free_msix_queue_irqs_uld(adap, type);
781 free_sge_queues_uld(adap, type);
782 free_queues_uld(adap, type);
783 adap_idx--;
784 }
785 mutex_unlock(&uld_mutex);
786 return ret;
787 }
788 EXPORT_SYMBOL(cxgb4_register_uld);
789
790 /**
791 * cxgb4_unregister_uld - unregister an upper-layer driver
792 * @type: the ULD type
793 *
794 * Unregisters an existing upper-layer driver.
795 */
cxgb4_unregister_uld(enum cxgb4_uld type)796 int cxgb4_unregister_uld(enum cxgb4_uld type)
797 {
798 struct adapter *adap;
799
800 if (type >= CXGB4_ULD_MAX)
801 return -EINVAL;
802
803 mutex_lock(&uld_mutex);
804 list_for_each_entry(adap, &adapter_list, list_node) {
805 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
806 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
807 continue;
808 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
809 continue;
810
811 cxgb4_shutdown_uld_adapter(adap, type);
812 }
813 mutex_unlock(&uld_mutex);
814
815 return 0;
816 }
817 EXPORT_SYMBOL(cxgb4_unregister_uld);
818