1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
3
4 #include <linux/bitfield.h>
5 #include <linux/dmapool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/if_vlan.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
12
13 #include "prestera_dsa.h"
14 #include "prestera.h"
15 #include "prestera_hw.h"
16 #include "prestera_rxtx.h"
17 #include "prestera_devlink.h"
18
19 #define PRESTERA_SDMA_WAIT_MUL 10
20
21 struct prestera_sdma_desc {
22 __le32 word1;
23 __le32 word2;
24 __le32 buff;
25 __le32 next;
26 } __packed __aligned(16);
27
28 #define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
29
30 #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
31 ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
32
33 #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
34 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
35
36 #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
37 (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
38
39 #define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
40 #define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
41
42 #define PRESTERA_SDMA_RX_QUEUE_NUM 8
43
44 #define PRESTERA_SDMA_RX_DESC_PER_Q 1000
45
46 #define PRESTERA_SDMA_TX_DESC_PER_Q 1000
47 #define PRESTERA_SDMA_TX_MAX_BURST 64
48
49 #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
50 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
51
52 #define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
53 #define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
54
55 #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
56 (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
57
58 #define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
59 #define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
60 #define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
61
62 #define PRESTERA_SDMA_TX_DESC_SINGLE \
63 (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
64
65 #define PRESTERA_SDMA_TX_DESC_INIT \
66 (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
67
68 #define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
69 #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
70 #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
71
72 #define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
73 #define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
74
75 struct prestera_sdma_buf {
76 struct prestera_sdma_desc *desc;
77 dma_addr_t desc_dma;
78 struct sk_buff *skb;
79 dma_addr_t buf_dma;
80 bool is_used;
81 };
82
83 struct prestera_rx_ring {
84 struct prestera_sdma_buf *bufs;
85 int next_rx;
86 };
87
88 struct prestera_tx_ring {
89 struct prestera_sdma_buf *bufs;
90 int next_tx;
91 int max_burst;
92 int burst;
93 };
94
95 struct prestera_sdma {
96 struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
97 struct prestera_tx_ring tx_ring;
98 struct prestera_switch *sw;
99 struct dma_pool *desc_pool;
100 struct work_struct tx_work;
101 struct napi_struct rx_napi;
102 struct net_device napi_dev;
103 u32 map_addr;
104 u64 dma_mask;
105 /* protect SDMA with concurrent access from multiple CPUs */
106 spinlock_t tx_lock;
107 };
108
109 struct prestera_rxtx {
110 struct prestera_sdma sdma;
111 };
112
prestera_sdma_buf_init(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)113 static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
114 struct prestera_sdma_buf *buf)
115 {
116 struct prestera_sdma_desc *desc;
117 dma_addr_t dma;
118
119 desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
120 if (!desc)
121 return -ENOMEM;
122
123 buf->buf_dma = DMA_MAPPING_ERROR;
124 buf->desc_dma = dma;
125 buf->desc = desc;
126 buf->skb = NULL;
127
128 return 0;
129 }
130
prestera_sdma_map(struct prestera_sdma * sdma,dma_addr_t pa)131 static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
132 {
133 return sdma->map_addr + pa;
134 }
135
prestera_sdma_rx_desc_init(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t buf)136 static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
137 struct prestera_sdma_desc *desc,
138 dma_addr_t buf)
139 {
140 u32 word = le32_to_cpu(desc->word2);
141
142 u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
143 desc->word2 = cpu_to_le32(word);
144
145 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
146
147 /* make sure buffer is set before reset the descriptor */
148 wmb();
149
150 desc->word1 = cpu_to_le32(0xA0000000);
151 }
152
prestera_sdma_rx_desc_set_next(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t next)153 static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
154 struct prestera_sdma_desc *desc,
155 dma_addr_t next)
156 {
157 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
158 }
159
prestera_sdma_rx_skb_alloc(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)160 static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
161 struct prestera_sdma_buf *buf)
162 {
163 struct device *dev = sdma->sw->dev->dev;
164 struct sk_buff *skb;
165 dma_addr_t dma;
166
167 skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
168 if (!skb)
169 return -ENOMEM;
170
171 dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
172 if (dma_mapping_error(dev, dma))
173 goto err_dma_map;
174
175 if (buf->skb)
176 dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
177 DMA_FROM_DEVICE);
178
179 buf->buf_dma = dma;
180 buf->skb = skb;
181
182 return 0;
183
184 err_dma_map:
185 kfree_skb(skb);
186
187 return -ENOMEM;
188 }
189
prestera_sdma_rx_skb_get(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)190 static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
191 struct prestera_sdma_buf *buf)
192 {
193 dma_addr_t buf_dma = buf->buf_dma;
194 struct sk_buff *skb = buf->skb;
195 u32 len = skb->len;
196 int err;
197
198 err = prestera_sdma_rx_skb_alloc(sdma, buf);
199 if (err) {
200 buf->buf_dma = buf_dma;
201 buf->skb = skb;
202
203 skb = alloc_skb(skb->len, GFP_ATOMIC);
204 if (skb) {
205 skb_put(skb, len);
206 skb_copy_from_linear_data(buf->skb, skb->data, len);
207 }
208 }
209
210 prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
211
212 return skb;
213 }
214
prestera_rxtx_process_skb(struct prestera_sdma * sdma,struct sk_buff * skb)215 static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
216 struct sk_buff *skb)
217 {
218 struct prestera_port *port;
219 struct prestera_dsa dsa;
220 u32 hw_port, dev_id;
221 u8 cpu_code;
222 int err;
223
224 skb_pull(skb, ETH_HLEN);
225
226 /* ethertype field is part of the dsa header */
227 err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
228 if (err)
229 return err;
230
231 dev_id = dsa.hw_dev_num;
232 hw_port = dsa.port_num;
233
234 port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
235 if (unlikely(!port)) {
236 dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
237 dev_id, hw_port);
238 return -ENOENT;
239 }
240
241 if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
242 return -EINVAL;
243
244 /* remove DSA tag and update checksum */
245 skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
246
247 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
248 ETH_ALEN * 2);
249
250 skb_push(skb, ETH_HLEN);
251
252 skb->protocol = eth_type_trans(skb, port->dev);
253
254 if (dsa.vlan.is_tagged) {
255 u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
256
257 tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
258 if (dsa.vlan.cfi_bit)
259 tci |= VLAN_CFI_MASK;
260
261 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
262 }
263
264 cpu_code = dsa.cpu_code;
265 prestera_devlink_trap_report(port, skb, cpu_code);
266
267 return 0;
268 }
269
prestera_sdma_next_rx_buf_idx(int buf_idx)270 static int prestera_sdma_next_rx_buf_idx(int buf_idx)
271 {
272 return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
273 }
274
prestera_sdma_rx_poll(struct napi_struct * napi,int budget)275 static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
276 {
277 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
278 unsigned int rxq_done_map = 0;
279 struct prestera_sdma *sdma;
280 struct list_head rx_list;
281 unsigned int qmask;
282 int pkts_done = 0;
283 int q;
284
285 qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
286 qmask = GENMASK(qnum - 1, 0);
287
288 INIT_LIST_HEAD(&rx_list);
289
290 sdma = container_of(napi, struct prestera_sdma, rx_napi);
291
292 while (pkts_done < budget && rxq_done_map != qmask) {
293 for (q = 0; q < qnum && pkts_done < budget; q++) {
294 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
295 struct prestera_sdma_desc *desc;
296 struct prestera_sdma_buf *buf;
297 int buf_idx = ring->next_rx;
298 struct sk_buff *skb;
299
300 buf = &ring->bufs[buf_idx];
301 desc = buf->desc;
302
303 if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
304 rxq_done_map &= ~BIT(q);
305 } else {
306 rxq_done_map |= BIT(q);
307 continue;
308 }
309
310 pkts_done++;
311
312 __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
313
314 skb = prestera_sdma_rx_skb_get(sdma, buf);
315 if (!skb)
316 goto rx_next_buf;
317
318 if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
319 goto rx_next_buf;
320
321 list_add_tail(&skb->list, &rx_list);
322 rx_next_buf:
323 ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
324 }
325 }
326
327 if (pkts_done < budget && napi_complete_done(napi, pkts_done))
328 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
329 GENMASK(9, 2));
330
331 netif_receive_skb_list(&rx_list);
332
333 return pkts_done;
334 }
335
prestera_sdma_rx_fini(struct prestera_sdma * sdma)336 static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
337 {
338 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
339 int q, b;
340
341 /* disable all rx queues */
342 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
343 GENMASK(15, 8));
344
345 for (q = 0; q < qnum; q++) {
346 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
347
348 if (!ring->bufs)
349 break;
350
351 for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
352 struct prestera_sdma_buf *buf = &ring->bufs[b];
353
354 if (buf->desc_dma)
355 dma_pool_free(sdma->desc_pool, buf->desc,
356 buf->desc_dma);
357
358 if (!buf->skb)
359 continue;
360
361 if (buf->buf_dma != DMA_MAPPING_ERROR)
362 dma_unmap_single(sdma->sw->dev->dev,
363 buf->buf_dma, buf->skb->len,
364 DMA_FROM_DEVICE);
365 kfree_skb(buf->skb);
366 }
367 }
368 }
369
prestera_sdma_rx_init(struct prestera_sdma * sdma)370 static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
371 {
372 int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
373 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
374 int err;
375 int q;
376
377 /* disable all rx queues */
378 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
379 GENMASK(15, 8));
380
381 for (q = 0; q < qnum; q++) {
382 struct prestera_sdma_buf *head, *tail, *next, *prev;
383 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
384
385 ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
386 if (!ring->bufs)
387 return -ENOMEM;
388
389 ring->next_rx = 0;
390
391 tail = &ring->bufs[bnum - 1];
392 head = &ring->bufs[0];
393 next = head;
394 prev = next;
395
396 do {
397 err = prestera_sdma_buf_init(sdma, next);
398 if (err)
399 return err;
400
401 err = prestera_sdma_rx_skb_alloc(sdma, next);
402 if (err)
403 return err;
404
405 prestera_sdma_rx_desc_init(sdma, next->desc,
406 next->buf_dma);
407
408 prestera_sdma_rx_desc_set_next(sdma, prev->desc,
409 next->desc_dma);
410
411 prev = next;
412 next++;
413 } while (prev != tail);
414
415 /* join tail with head to make a circular list */
416 prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
417
418 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
419 prestera_sdma_map(sdma, head->desc_dma));
420 }
421
422 /* make sure all rx descs are filled before enabling all rx queues */
423 wmb();
424
425 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
426 GENMASK(7, 0));
427
428 return 0;
429 }
430
prestera_sdma_tx_desc_init(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc)431 static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
432 struct prestera_sdma_desc *desc)
433 {
434 desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
435 desc->word2 = 0;
436 }
437
prestera_sdma_tx_desc_set_next(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t next)438 static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
439 struct prestera_sdma_desc *desc,
440 dma_addr_t next)
441 {
442 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
443 }
444
prestera_sdma_tx_desc_set_buf(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t buf,size_t len)445 static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
446 struct prestera_sdma_desc *desc,
447 dma_addr_t buf, size_t len)
448 {
449 u32 word = le32_to_cpu(desc->word2);
450
451 u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
452
453 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
454 desc->word2 = cpu_to_le32(word);
455 }
456
prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc * desc)457 static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
458 {
459 u32 word = le32_to_cpu(desc->word1);
460
461 word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
462
463 /* make sure everything is written before enable xmit */
464 wmb();
465
466 desc->word1 = cpu_to_le32(word);
467 }
468
prestera_sdma_tx_buf_map(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf,struct sk_buff * skb)469 static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
470 struct prestera_sdma_buf *buf,
471 struct sk_buff *skb)
472 {
473 struct device *dma_dev = sdma->sw->dev->dev;
474 dma_addr_t dma;
475
476 dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
477 if (dma_mapping_error(dma_dev, dma))
478 return -ENOMEM;
479
480 buf->buf_dma = dma;
481 buf->skb = skb;
482
483 return 0;
484 }
485
prestera_sdma_tx_buf_unmap(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)486 static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
487 struct prestera_sdma_buf *buf)
488 {
489 struct device *dma_dev = sdma->sw->dev->dev;
490
491 dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
492 }
493
prestera_sdma_tx_recycle_work_fn(struct work_struct * work)494 static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
495 {
496 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
497 struct prestera_tx_ring *tx_ring;
498 struct prestera_sdma *sdma;
499 int b;
500
501 sdma = container_of(work, struct prestera_sdma, tx_work);
502
503 tx_ring = &sdma->tx_ring;
504
505 for (b = 0; b < bnum; b++) {
506 struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
507
508 if (!buf->is_used)
509 continue;
510
511 if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
512 continue;
513
514 prestera_sdma_tx_buf_unmap(sdma, buf);
515 dev_consume_skb_any(buf->skb);
516 buf->skb = NULL;
517
518 /* make sure everything is cleaned up */
519 wmb();
520
521 buf->is_used = false;
522 }
523 }
524
prestera_sdma_tx_init(struct prestera_sdma * sdma)525 static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
526 {
527 struct prestera_sdma_buf *head, *tail, *next, *prev;
528 struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
529 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
530 int err;
531
532 INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
533 spin_lock_init(&sdma->tx_lock);
534
535 tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
536 if (!tx_ring->bufs)
537 return -ENOMEM;
538
539 tail = &tx_ring->bufs[bnum - 1];
540 head = &tx_ring->bufs[0];
541 next = head;
542 prev = next;
543
544 tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
545 tx_ring->burst = tx_ring->max_burst;
546 tx_ring->next_tx = 0;
547
548 do {
549 err = prestera_sdma_buf_init(sdma, next);
550 if (err)
551 return err;
552
553 next->is_used = false;
554
555 prestera_sdma_tx_desc_init(sdma, next->desc);
556
557 prestera_sdma_tx_desc_set_next(sdma, prev->desc,
558 next->desc_dma);
559
560 prev = next;
561 next++;
562 } while (prev != tail);
563
564 /* join tail with head to make a circular list */
565 prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
566
567 /* make sure descriptors are written */
568 wmb();
569
570 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
571 prestera_sdma_map(sdma, head->desc_dma));
572
573 return 0;
574 }
575
prestera_sdma_tx_fini(struct prestera_sdma * sdma)576 static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
577 {
578 struct prestera_tx_ring *ring = &sdma->tx_ring;
579 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
580 int b;
581
582 cancel_work_sync(&sdma->tx_work);
583
584 if (!ring->bufs)
585 return;
586
587 for (b = 0; b < bnum; b++) {
588 struct prestera_sdma_buf *buf = &ring->bufs[b];
589
590 if (buf->desc)
591 dma_pool_free(sdma->desc_pool, buf->desc,
592 buf->desc_dma);
593
594 if (!buf->skb)
595 continue;
596
597 dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
598 buf->skb->len, DMA_TO_DEVICE);
599
600 dev_consume_skb_any(buf->skb);
601 }
602 }
603
prestera_rxtx_handle_event(struct prestera_switch * sw,struct prestera_event * evt,void * arg)604 static void prestera_rxtx_handle_event(struct prestera_switch *sw,
605 struct prestera_event *evt,
606 void *arg)
607 {
608 struct prestera_sdma *sdma = arg;
609
610 if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
611 return;
612
613 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
614 napi_schedule(&sdma->rx_napi);
615 }
616
prestera_sdma_switch_init(struct prestera_switch * sw)617 static int prestera_sdma_switch_init(struct prestera_switch *sw)
618 {
619 struct prestera_sdma *sdma = &sw->rxtx->sdma;
620 struct device *dev = sw->dev->dev;
621 struct prestera_rxtx_params p;
622 int err;
623
624 p.use_sdma = true;
625
626 err = prestera_hw_rxtx_init(sw, &p);
627 if (err) {
628 dev_err(dev, "failed to init rxtx by hw\n");
629 return err;
630 }
631
632 sdma->dma_mask = dma_get_mask(dev);
633 sdma->map_addr = p.map_addr;
634 sdma->sw = sw;
635
636 sdma->desc_pool = dma_pool_create("desc_pool", dev,
637 sizeof(struct prestera_sdma_desc),
638 16, 0);
639 if (!sdma->desc_pool)
640 return -ENOMEM;
641
642 err = prestera_sdma_rx_init(sdma);
643 if (err) {
644 dev_err(dev, "failed to init rx ring\n");
645 goto err_rx_init;
646 }
647
648 err = prestera_sdma_tx_init(sdma);
649 if (err) {
650 dev_err(dev, "failed to init tx ring\n");
651 goto err_tx_init;
652 }
653
654 err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
655 prestera_rxtx_handle_event,
656 sdma);
657 if (err)
658 goto err_evt_register;
659
660 init_dummy_netdev(&sdma->napi_dev);
661
662 netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
663 napi_enable(&sdma->rx_napi);
664
665 return 0;
666
667 err_evt_register:
668 err_tx_init:
669 prestera_sdma_tx_fini(sdma);
670 err_rx_init:
671 prestera_sdma_rx_fini(sdma);
672
673 dma_pool_destroy(sdma->desc_pool);
674 return err;
675 }
676
prestera_sdma_switch_fini(struct prestera_switch * sw)677 static void prestera_sdma_switch_fini(struct prestera_switch *sw)
678 {
679 struct prestera_sdma *sdma = &sw->rxtx->sdma;
680
681 napi_disable(&sdma->rx_napi);
682 netif_napi_del(&sdma->rx_napi);
683 prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
684 prestera_rxtx_handle_event);
685 prestera_sdma_tx_fini(sdma);
686 prestera_sdma_rx_fini(sdma);
687 dma_pool_destroy(sdma->desc_pool);
688 }
689
prestera_sdma_is_ready(struct prestera_sdma * sdma)690 static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
691 {
692 return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
693 }
694
prestera_sdma_tx_wait(struct prestera_sdma * sdma,struct prestera_tx_ring * tx_ring)695 static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
696 struct prestera_tx_ring *tx_ring)
697 {
698 int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
699
700 do {
701 if (prestera_sdma_is_ready(sdma))
702 return 0;
703
704 udelay(1);
705 } while (--tx_wait_num);
706
707 return -EBUSY;
708 }
709
prestera_sdma_tx_start(struct prestera_sdma * sdma)710 static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
711 {
712 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
713 schedule_work(&sdma->tx_work);
714 }
715
prestera_sdma_xmit(struct prestera_sdma * sdma,struct sk_buff * skb)716 static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
717 struct sk_buff *skb)
718 {
719 struct device *dma_dev = sdma->sw->dev->dev;
720 struct net_device *dev = skb->dev;
721 struct prestera_tx_ring *tx_ring;
722 struct prestera_sdma_buf *buf;
723 int err;
724
725 spin_lock(&sdma->tx_lock);
726
727 tx_ring = &sdma->tx_ring;
728
729 buf = &tx_ring->bufs[tx_ring->next_tx];
730 if (buf->is_used) {
731 schedule_work(&sdma->tx_work);
732 goto drop_skb;
733 }
734
735 if (unlikely(eth_skb_pad(skb)))
736 goto drop_skb_nofree;
737
738 err = prestera_sdma_tx_buf_map(sdma, buf, skb);
739 if (err)
740 goto drop_skb;
741
742 prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
743
744 dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
745 DMA_TO_DEVICE);
746
747 if (tx_ring->burst) {
748 tx_ring->burst--;
749 } else {
750 tx_ring->burst = tx_ring->max_burst;
751
752 err = prestera_sdma_tx_wait(sdma, tx_ring);
753 if (err)
754 goto drop_skb_unmap;
755 }
756
757 tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
758 prestera_sdma_tx_desc_xmit(buf->desc);
759 buf->is_used = true;
760
761 prestera_sdma_tx_start(sdma);
762
763 goto tx_done;
764
765 drop_skb_unmap:
766 prestera_sdma_tx_buf_unmap(sdma, buf);
767 drop_skb:
768 dev_consume_skb_any(skb);
769 drop_skb_nofree:
770 dev->stats.tx_dropped++;
771 tx_done:
772 spin_unlock(&sdma->tx_lock);
773 return NETDEV_TX_OK;
774 }
775
prestera_rxtx_switch_init(struct prestera_switch * sw)776 int prestera_rxtx_switch_init(struct prestera_switch *sw)
777 {
778 struct prestera_rxtx *rxtx;
779 int err;
780
781 rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
782 if (!rxtx)
783 return -ENOMEM;
784
785 sw->rxtx = rxtx;
786
787 err = prestera_sdma_switch_init(sw);
788 if (err)
789 kfree(rxtx);
790
791 return err;
792 }
793
prestera_rxtx_switch_fini(struct prestera_switch * sw)794 void prestera_rxtx_switch_fini(struct prestera_switch *sw)
795 {
796 prestera_sdma_switch_fini(sw);
797 kfree(sw->rxtx);
798 }
799
prestera_rxtx_port_init(struct prestera_port * port)800 int prestera_rxtx_port_init(struct prestera_port *port)
801 {
802 port->dev->needed_headroom = PRESTERA_DSA_HLEN;
803 return 0;
804 }
805
prestera_rxtx_xmit(struct prestera_port * port,struct sk_buff * skb)806 netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
807 {
808 struct prestera_dsa dsa;
809
810 dsa.hw_dev_num = port->dev_id;
811 dsa.port_num = port->hw_id;
812
813 if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
814 return NET_XMIT_DROP;
815
816 skb_push(skb, PRESTERA_DSA_HLEN);
817 memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
818
819 if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
820 return NET_XMIT_DROP;
821
822 return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
823 }
824