1 /*
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #define DRV_NAME "cxgbit"
10 #define DRV_VERSION "1.0.0-ko"
11 #define pr_fmt(fmt) DRV_NAME ": " fmt
12
13 #include "cxgbit.h"
14
15 #ifdef CONFIG_CHELSIO_T4_DCB
16 #include <net/dcbevent.h>
17 #include "cxgb4_dcb.h"
18 #endif
19
20 LIST_HEAD(cdev_list_head);
21 /* cdev list lock */
22 DEFINE_MUTEX(cdev_list_lock);
23
_cxgbit_free_cdev(struct kref * kref)24 void _cxgbit_free_cdev(struct kref *kref)
25 {
26 struct cxgbit_device *cdev;
27
28 cdev = container_of(kref, struct cxgbit_device, kref);
29
30 cxgbi_ppm_release(cdev2ppm(cdev));
31 kfree(cdev);
32 }
33
cxgbit_set_mdsl(struct cxgbit_device * cdev)34 static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
35 {
36 struct cxgb4_lld_info *lldi = &cdev->lldi;
37 u32 mdsl;
38
39 #define ULP2_MAX_PKT_LEN 16224
40 #define ISCSI_PDU_NONPAYLOAD_LEN 312
41 mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
42 ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
43 mdsl = min_t(u32, mdsl, 8192);
44 mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
45
46 cdev->mdsl = mdsl;
47 }
48
cxgbit_uld_add(const struct cxgb4_lld_info * lldi)49 static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
50 {
51 struct cxgbit_device *cdev;
52
53 if (is_t4(lldi->adapter_type))
54 return ERR_PTR(-ENODEV);
55
56 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
57 if (!cdev)
58 return ERR_PTR(-ENOMEM);
59
60 kref_init(&cdev->kref);
61 spin_lock_init(&cdev->np_lock);
62
63 cdev->lldi = *lldi;
64
65 cxgbit_set_mdsl(cdev);
66
67 if (cxgbit_ddp_init(cdev) < 0) {
68 kfree(cdev);
69 return ERR_PTR(-EINVAL);
70 }
71
72 if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
73 pr_info("cdev %s ddp init failed\n",
74 pci_name(lldi->pdev));
75
76 if (lldi->fw_vers >= 0x10d2b00)
77 set_bit(CDEV_ISO_ENABLE, &cdev->flags);
78
79 spin_lock_init(&cdev->cskq.lock);
80 INIT_LIST_HEAD(&cdev->cskq.list);
81
82 mutex_lock(&cdev_list_lock);
83 list_add_tail(&cdev->list, &cdev_list_head);
84 mutex_unlock(&cdev_list_lock);
85
86 pr_info("cdev %s added for iSCSI target transport\n",
87 pci_name(lldi->pdev));
88
89 return cdev;
90 }
91
cxgbit_close_conn(struct cxgbit_device * cdev)92 static void cxgbit_close_conn(struct cxgbit_device *cdev)
93 {
94 struct cxgbit_sock *csk;
95 struct sk_buff *skb;
96 bool wakeup_thread = false;
97
98 spin_lock_bh(&cdev->cskq.lock);
99 list_for_each_entry(csk, &cdev->cskq.list, list) {
100 skb = alloc_skb(0, GFP_ATOMIC);
101 if (!skb)
102 continue;
103
104 spin_lock_bh(&csk->rxq.lock);
105 __skb_queue_tail(&csk->rxq, skb);
106 if (skb_queue_len(&csk->rxq) == 1)
107 wakeup_thread = true;
108 spin_unlock_bh(&csk->rxq.lock);
109
110 if (wakeup_thread) {
111 wake_up(&csk->waitq);
112 wakeup_thread = false;
113 }
114 }
115 spin_unlock_bh(&cdev->cskq.lock);
116 }
117
cxgbit_detach_cdev(struct cxgbit_device * cdev)118 static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
119 {
120 bool free_cdev = false;
121
122 spin_lock_bh(&cdev->cskq.lock);
123 if (list_empty(&cdev->cskq.list))
124 free_cdev = true;
125 spin_unlock_bh(&cdev->cskq.lock);
126
127 if (free_cdev) {
128 mutex_lock(&cdev_list_lock);
129 list_del(&cdev->list);
130 mutex_unlock(&cdev_list_lock);
131
132 cxgbit_put_cdev(cdev);
133 } else {
134 cxgbit_close_conn(cdev);
135 }
136 }
137
cxgbit_uld_state_change(void * handle,enum cxgb4_state state)138 static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
139 {
140 struct cxgbit_device *cdev = handle;
141
142 switch (state) {
143 case CXGB4_STATE_UP:
144 set_bit(CDEV_STATE_UP, &cdev->flags);
145 pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
146 break;
147 case CXGB4_STATE_START_RECOVERY:
148 clear_bit(CDEV_STATE_UP, &cdev->flags);
149 cxgbit_close_conn(cdev);
150 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
151 break;
152 case CXGB4_STATE_DOWN:
153 pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
154 break;
155 case CXGB4_STATE_DETACH:
156 clear_bit(CDEV_STATE_UP, &cdev->flags);
157 pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
158 cxgbit_detach_cdev(cdev);
159 break;
160 default:
161 pr_info("cdev %s unknown state %d.\n",
162 pci_name(cdev->lldi.pdev), state);
163 break;
164 }
165 return 0;
166 }
167
168 static void
cxgbit_process_ddpvld(struct cxgbit_sock * csk,struct cxgbit_lro_pdu_cb * pdu_cb,u32 ddpvld)169 cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
170 u32 ddpvld)
171 {
172
173 if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
174 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
175 pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
176 }
177
178 if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
179 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
180 pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
181 }
182
183 if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
184 pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
185
186 if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
187 (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
188 pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
189 }
190 }
191
192 static void
cxgbit_lro_add_packet_rsp(struct sk_buff * skb,u8 op,const __be64 * rsp)193 cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
194 {
195 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
196 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
197 lro_cb->pdu_idx);
198 struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
199
200 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
201
202 pdu_cb->flags |= PDUCBF_RX_STATUS;
203 pdu_cb->ddigest = ntohl(cpl->ulp_crc);
204 pdu_cb->pdulen = ntohs(cpl->len);
205
206 if (pdu_cb->flags & PDUCBF_RX_HDR)
207 pdu_cb->complete = true;
208
209 lro_cb->pdu_totallen += pdu_cb->pdulen;
210 lro_cb->complete = true;
211 lro_cb->pdu_idx++;
212 }
213
214 static void
cxgbit_copy_frags(struct sk_buff * skb,const struct pkt_gl * gl,unsigned int offset)215 cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
216 unsigned int offset)
217 {
218 u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
219 u8 i;
220
221 /* usually there's just one frag */
222 __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
223 gl->frags[0].offset + offset,
224 gl->frags[0].size - offset);
225 for (i = 1; i < gl->nfrags; i++)
226 __skb_fill_page_desc(skb, skb_frag_idx + i,
227 gl->frags[i].page,
228 gl->frags[i].offset,
229 gl->frags[i].size);
230
231 skb_shinfo(skb)->nr_frags += gl->nfrags;
232
233 /* get a reference to the last page, we don't own it */
234 get_page(gl->frags[gl->nfrags - 1].page);
235 }
236
237 static void
cxgbit_lro_add_packet_gl(struct sk_buff * skb,u8 op,const struct pkt_gl * gl)238 cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
239 {
240 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
241 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
242 lro_cb->pdu_idx);
243 u32 len, offset;
244
245 if (op == CPL_ISCSI_HDR) {
246 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
247
248 offset = sizeof(struct cpl_iscsi_hdr);
249 pdu_cb->flags |= PDUCBF_RX_HDR;
250 pdu_cb->seq = ntohl(cpl->seq);
251 len = ntohs(cpl->len);
252 pdu_cb->hdr = gl->va + offset;
253 pdu_cb->hlen = len;
254 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
255
256 if (unlikely(gl->nfrags > 1))
257 cxgbit_skcb_flags(skb) = 0;
258
259 lro_cb->complete = false;
260 } else if (op == CPL_ISCSI_DATA) {
261 struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
262
263 offset = sizeof(struct cpl_iscsi_data);
264 pdu_cb->flags |= PDUCBF_RX_DATA;
265 len = ntohs(cpl->len);
266 pdu_cb->dlen = len;
267 pdu_cb->doffset = lro_cb->offset;
268 pdu_cb->nr_dfrags = gl->nfrags;
269 pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
270 lro_cb->complete = false;
271 } else {
272 struct cpl_rx_iscsi_cmp *cpl;
273
274 cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
275 offset = sizeof(struct cpl_rx_iscsi_cmp);
276 pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
277 len = be16_to_cpu(cpl->len);
278 pdu_cb->hdr = gl->va + offset;
279 pdu_cb->hlen = len;
280 pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
281 pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
282 pdu_cb->pdulen = ntohs(cpl->len);
283
284 if (unlikely(gl->nfrags > 1))
285 cxgbit_skcb_flags(skb) = 0;
286
287 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
288 be32_to_cpu(cpl->ddpvld));
289
290 if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
291 pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
292 pdu_cb->complete = true;
293 } else if (pdu_cb->flags & PDUCBF_RX_DATA) {
294 pdu_cb->complete = true;
295 }
296
297 lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
298 lro_cb->complete = true;
299 lro_cb->pdu_idx++;
300 }
301
302 cxgbit_copy_frags(skb, gl, offset);
303
304 pdu_cb->frags += gl->nfrags;
305 lro_cb->offset += len;
306 skb->len += len;
307 skb->data_len += len;
308 skb->truesize += len;
309 }
310
311 static struct sk_buff *
cxgbit_lro_init_skb(struct cxgbit_sock * csk,u8 op,const struct pkt_gl * gl,const __be64 * rsp,struct napi_struct * napi)312 cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
313 const __be64 *rsp, struct napi_struct *napi)
314 {
315 struct sk_buff *skb;
316 struct cxgbit_lro_cb *lro_cb;
317
318 skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
319
320 if (unlikely(!skb))
321 return NULL;
322
323 memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
324
325 cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
326
327 lro_cb = cxgbit_skb_lro_cb(skb);
328
329 cxgbit_get_csk(csk);
330
331 lro_cb->csk = csk;
332
333 return skb;
334 }
335
cxgbit_queue_lro_skb(struct cxgbit_sock * csk,struct sk_buff * skb)336 static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
337 {
338 bool wakeup_thread = false;
339
340 spin_lock(&csk->rxq.lock);
341 __skb_queue_tail(&csk->rxq, skb);
342 if (skb_queue_len(&csk->rxq) == 1)
343 wakeup_thread = true;
344 spin_unlock(&csk->rxq.lock);
345
346 if (wakeup_thread)
347 wake_up(&csk->waitq);
348 }
349
cxgbit_lro_flush(struct t4_lro_mgr * lro_mgr,struct sk_buff * skb)350 static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
351 {
352 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
353 struct cxgbit_sock *csk = lro_cb->csk;
354
355 csk->lro_skb = NULL;
356
357 __skb_unlink(skb, &lro_mgr->lroq);
358 cxgbit_queue_lro_skb(csk, skb);
359
360 cxgbit_put_csk(csk);
361
362 lro_mgr->lro_pkts++;
363 lro_mgr->lro_session_cnt--;
364 }
365
cxgbit_uld_lro_flush(struct t4_lro_mgr * lro_mgr)366 static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
367 {
368 struct sk_buff *skb;
369
370 while ((skb = skb_peek(&lro_mgr->lroq)))
371 cxgbit_lro_flush(lro_mgr, skb);
372 }
373
374 static int
cxgbit_lro_receive(struct cxgbit_sock * csk,u8 op,const __be64 * rsp,const struct pkt_gl * gl,struct t4_lro_mgr * lro_mgr,struct napi_struct * napi)375 cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
376 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
377 struct napi_struct *napi)
378 {
379 struct sk_buff *skb;
380 struct cxgbit_lro_cb *lro_cb;
381
382 if (!csk) {
383 pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
384 goto out;
385 }
386
387 if (csk->lro_skb)
388 goto add_packet;
389
390 start_lro:
391 if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
392 cxgbit_uld_lro_flush(lro_mgr);
393 goto start_lro;
394 }
395
396 skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
397 if (unlikely(!skb))
398 goto out;
399
400 csk->lro_skb = skb;
401
402 __skb_queue_tail(&lro_mgr->lroq, skb);
403 lro_mgr->lro_session_cnt++;
404
405 add_packet:
406 skb = csk->lro_skb;
407 lro_cb = cxgbit_skb_lro_cb(skb);
408
409 if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
410 MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
411 (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
412 cxgbit_lro_flush(lro_mgr, skb);
413 goto start_lro;
414 }
415
416 if (gl)
417 cxgbit_lro_add_packet_gl(skb, op, gl);
418 else
419 cxgbit_lro_add_packet_rsp(skb, op, rsp);
420
421 lro_mgr->lro_merged++;
422
423 return 0;
424
425 out:
426 return -1;
427 }
428
429 static int
cxgbit_uld_lro_rx_handler(void * hndl,const __be64 * rsp,const struct pkt_gl * gl,struct t4_lro_mgr * lro_mgr,struct napi_struct * napi)430 cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
431 const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
432 struct napi_struct *napi)
433 {
434 struct cxgbit_device *cdev = hndl;
435 struct cxgb4_lld_info *lldi = &cdev->lldi;
436 struct cpl_tx_data *rpl = NULL;
437 struct cxgbit_sock *csk = NULL;
438 unsigned int tid = 0;
439 struct sk_buff *skb;
440 unsigned int op = *(u8 *)rsp;
441 bool lro_flush = true;
442
443 switch (op) {
444 case CPL_ISCSI_HDR:
445 case CPL_ISCSI_DATA:
446 case CPL_RX_ISCSI_CMP:
447 case CPL_RX_ISCSI_DDP:
448 case CPL_FW4_ACK:
449 lro_flush = false;
450 /* fall through */
451 case CPL_ABORT_RPL_RSS:
452 case CPL_PASS_ESTABLISH:
453 case CPL_PEER_CLOSE:
454 case CPL_CLOSE_CON_RPL:
455 case CPL_ABORT_REQ_RSS:
456 case CPL_SET_TCB_RPL:
457 case CPL_RX_DATA:
458 rpl = gl ? (struct cpl_tx_data *)gl->va :
459 (struct cpl_tx_data *)(rsp + 1);
460 tid = GET_TID(rpl);
461 csk = lookup_tid(lldi->tids, tid);
462 break;
463 default:
464 break;
465 }
466
467 if (csk && csk->lro_skb && lro_flush)
468 cxgbit_lro_flush(lro_mgr, csk->lro_skb);
469
470 if (!gl) {
471 unsigned int len;
472
473 if (op == CPL_RX_ISCSI_DDP) {
474 if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
475 napi))
476 return 0;
477 }
478
479 len = 64 - sizeof(struct rsp_ctrl) - 8;
480 skb = napi_alloc_skb(napi, len);
481 if (!skb)
482 goto nomem;
483 __skb_put(skb, len);
484 skb_copy_to_linear_data(skb, &rsp[1], len);
485 } else {
486 if (unlikely(op != *(u8 *)gl->va)) {
487 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
488 gl->va, be64_to_cpu(*rsp),
489 get_unaligned_be64(gl->va),
490 gl->tot_len);
491 return 0;
492 }
493
494 if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
495 (op == CPL_RX_ISCSI_CMP)) {
496 if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
497 napi))
498 return 0;
499 }
500
501 #define RX_PULL_LEN 128
502 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
503 if (unlikely(!skb))
504 goto nomem;
505 }
506
507 rpl = (struct cpl_tx_data *)skb->data;
508 op = rpl->ot.opcode;
509 cxgbit_skcb_rx_opcode(skb) = op;
510
511 pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
512 cdev, op, rpl->ot.opcode_tid,
513 ntohl(rpl->ot.opcode_tid), skb);
514
515 if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
516 cxgbit_cplhandlers[op](cdev, skb);
517 } else {
518 pr_err("No handler for opcode 0x%x.\n", op);
519 __kfree_skb(skb);
520 }
521 return 0;
522 nomem:
523 pr_err("%s OOM bailing out.\n", __func__);
524 return 1;
525 }
526
527 #ifdef CONFIG_CHELSIO_T4_DCB
528 struct cxgbit_dcb_work {
529 struct dcb_app_type dcb_app;
530 struct work_struct work;
531 };
532
533 static void
cxgbit_update_dcb_priority(struct cxgbit_device * cdev,u8 port_id,u8 dcb_priority,u16 port_num)534 cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
535 u8 dcb_priority, u16 port_num)
536 {
537 struct cxgbit_sock *csk;
538 struct sk_buff *skb;
539 u16 local_port;
540 bool wakeup_thread = false;
541
542 spin_lock_bh(&cdev->cskq.lock);
543 list_for_each_entry(csk, &cdev->cskq.list, list) {
544 if (csk->port_id != port_id)
545 continue;
546
547 if (csk->com.local_addr.ss_family == AF_INET6) {
548 struct sockaddr_in6 *sock_in6;
549
550 sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
551 local_port = ntohs(sock_in6->sin6_port);
552 } else {
553 struct sockaddr_in *sock_in;
554
555 sock_in = (struct sockaddr_in *)&csk->com.local_addr;
556 local_port = ntohs(sock_in->sin_port);
557 }
558
559 if (local_port != port_num)
560 continue;
561
562 if (csk->dcb_priority == dcb_priority)
563 continue;
564
565 skb = alloc_skb(0, GFP_ATOMIC);
566 if (!skb)
567 continue;
568
569 spin_lock(&csk->rxq.lock);
570 __skb_queue_tail(&csk->rxq, skb);
571 if (skb_queue_len(&csk->rxq) == 1)
572 wakeup_thread = true;
573 spin_unlock(&csk->rxq.lock);
574
575 if (wakeup_thread) {
576 wake_up(&csk->waitq);
577 wakeup_thread = false;
578 }
579 }
580 spin_unlock_bh(&cdev->cskq.lock);
581 }
582
cxgbit_dcb_workfn(struct work_struct * work)583 static void cxgbit_dcb_workfn(struct work_struct *work)
584 {
585 struct cxgbit_dcb_work *dcb_work;
586 struct net_device *ndev;
587 struct cxgbit_device *cdev = NULL;
588 struct dcb_app_type *iscsi_app;
589 u8 priority, port_id = 0xff;
590
591 dcb_work = container_of(work, struct cxgbit_dcb_work, work);
592 iscsi_app = &dcb_work->dcb_app;
593
594 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
595 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
596 goto out;
597
598 priority = iscsi_app->app.priority;
599
600 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
601 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
602 goto out;
603
604 if (!iscsi_app->app.priority)
605 goto out;
606
607 priority = ffs(iscsi_app->app.priority) - 1;
608 } else {
609 goto out;
610 }
611
612 pr_debug("priority for ifid %d is %u\n",
613 iscsi_app->ifindex, priority);
614
615 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
616
617 if (!ndev)
618 goto out;
619
620 mutex_lock(&cdev_list_lock);
621 cdev = cxgbit_find_device(ndev, &port_id);
622
623 dev_put(ndev);
624
625 if (!cdev) {
626 mutex_unlock(&cdev_list_lock);
627 goto out;
628 }
629
630 cxgbit_update_dcb_priority(cdev, port_id, priority,
631 iscsi_app->app.protocol);
632 mutex_unlock(&cdev_list_lock);
633 out:
634 kfree(dcb_work);
635 }
636
637 static int
cxgbit_dcbevent_notify(struct notifier_block * nb,unsigned long action,void * data)638 cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
639 void *data)
640 {
641 struct cxgbit_dcb_work *dcb_work;
642 struct dcb_app_type *dcb_app = data;
643
644 dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
645 if (!dcb_work)
646 return NOTIFY_DONE;
647
648 dcb_work->dcb_app = *dcb_app;
649 INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
650 schedule_work(&dcb_work->work);
651 return NOTIFY_OK;
652 }
653 #endif
654
cxgbit_get_sup_prot_ops(struct iscsi_conn * conn)655 static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
656 {
657 return TARGET_PROT_NORMAL;
658 }
659
660 static struct iscsit_transport cxgbit_transport = {
661 .name = DRV_NAME,
662 .transport_type = ISCSI_CXGBIT,
663 .rdma_shutdown = false,
664 .priv_size = sizeof(struct cxgbit_cmd),
665 .owner = THIS_MODULE,
666 .iscsit_setup_np = cxgbit_setup_np,
667 .iscsit_accept_np = cxgbit_accept_np,
668 .iscsit_free_np = cxgbit_free_np,
669 .iscsit_free_conn = cxgbit_free_conn,
670 .iscsit_get_login_rx = cxgbit_get_login_rx,
671 .iscsit_put_login_tx = cxgbit_put_login_tx,
672 .iscsit_immediate_queue = iscsit_immediate_queue,
673 .iscsit_response_queue = iscsit_response_queue,
674 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
675 .iscsit_queue_data_in = iscsit_queue_rsp,
676 .iscsit_queue_status = iscsit_queue_rsp,
677 .iscsit_xmit_pdu = cxgbit_xmit_pdu,
678 .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
679 .iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
680 .iscsit_validate_params = cxgbit_validate_params,
681 .iscsit_release_cmd = cxgbit_release_cmd,
682 .iscsit_aborted_task = iscsit_aborted_task,
683 .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
684 };
685
686 static struct cxgb4_uld_info cxgbit_uld_info = {
687 .name = DRV_NAME,
688 .nrxq = MAX_ULD_QSETS,
689 .ntxq = MAX_ULD_QSETS,
690 .rxq_size = 1024,
691 .lro = true,
692 .add = cxgbit_uld_add,
693 .state_change = cxgbit_uld_state_change,
694 .lro_rx_handler = cxgbit_uld_lro_rx_handler,
695 .lro_flush = cxgbit_uld_lro_flush,
696 };
697
698 #ifdef CONFIG_CHELSIO_T4_DCB
699 static struct notifier_block cxgbit_dcbevent_nb = {
700 .notifier_call = cxgbit_dcbevent_notify,
701 };
702 #endif
703
cxgbit_init(void)704 static int __init cxgbit_init(void)
705 {
706 cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
707 iscsit_register_transport(&cxgbit_transport);
708
709 #ifdef CONFIG_CHELSIO_T4_DCB
710 pr_info("%s dcb enabled.\n", DRV_NAME);
711 register_dcbevent_notifier(&cxgbit_dcbevent_nb);
712 #endif
713 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
714 sizeof(union cxgbit_skb_cb));
715 return 0;
716 }
717
cxgbit_exit(void)718 static void __exit cxgbit_exit(void)
719 {
720 struct cxgbit_device *cdev, *tmp;
721
722 #ifdef CONFIG_CHELSIO_T4_DCB
723 unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
724 #endif
725 mutex_lock(&cdev_list_lock);
726 list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
727 list_del(&cdev->list);
728 cxgbit_put_cdev(cdev);
729 }
730 mutex_unlock(&cdev_list_lock);
731 iscsit_unregister_transport(&cxgbit_transport);
732 cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
733 }
734
735 module_init(cxgbit_init);
736 module_exit(cxgbit_exit);
737
738 MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
739 MODULE_AUTHOR("Chelsio Communications");
740 MODULE_VERSION(DRV_VERSION);
741 MODULE_LICENSE("GPL");
742