1 /*
2 * Copyright (c) 2014-2015 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/cdev.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <asm/cacheflush.h>
17 #include <linux/platform_device.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/spinlock.h>
23
24 #include "hns_dsaf_main.h"
25 #include "hns_dsaf_ppe.h"
26 #include "hns_dsaf_rcb.h"
27
28 #define RCB_COMMON_REG_OFFSET 0x80000
29 #define TX_RING 0
30 #define RX_RING 1
31
32 #define RCB_RESET_WAIT_TIMES 30
33 #define RCB_RESET_TRY_TIMES 10
34
35 /**
36 *hns_rcb_wait_fbd_clean - clean fbd
37 *@qs: ring struct pointer array
38 *@qnum: num of array
39 *@flag: tx or rx flag
40 */
hns_rcb_wait_fbd_clean(struct hnae_queue ** qs,int q_num,u32 flag)41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
42 {
43 int i, wait_cnt;
44 u32 fbd_num;
45
46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
47 usleep_range(200, 300);
48 fbd_num = 0;
49 if (flag & RCB_INT_FLAG_TX)
50 fbd_num += dsaf_read_dev(qs[i],
51 RCB_RING_TX_RING_FBDNUM_REG);
52 if (flag & RCB_INT_FLAG_RX)
53 fbd_num += dsaf_read_dev(qs[i],
54 RCB_RING_RX_RING_FBDNUM_REG);
55 if (!fbd_num)
56 i++;
57 if (wait_cnt >= 10000)
58 break;
59 }
60
61 if (i < q_num)
62 dev_err(qs[i]->handle->owner_dev,
63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
64 }
65
66 /**
67 *hns_rcb_reset_ring_hw - ring reset
68 *@q: ring struct pointer
69 */
hns_rcb_reset_ring_hw(struct hnae_queue * q)70 void hns_rcb_reset_ring_hw(struct hnae_queue *q)
71 {
72 u32 wait_cnt;
73 u32 try_cnt = 0;
74 u32 could_ret;
75
76 u32 tx_fbd_num;
77
78 while (try_cnt++ < RCB_RESET_TRY_TIMES) {
79 usleep_range(100, 200);
80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
81 if (tx_fbd_num)
82 continue;
83
84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
85
86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
87
88 msleep(20);
89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
90
91 wait_cnt = 0;
92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
94
95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
96
97 msleep(20);
98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
99
100 wait_cnt++;
101 }
102
103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
104
105 if (could_ret)
106 break;
107 }
108
109 if (try_cnt >= RCB_RESET_TRY_TIMES)
110 dev_err(q->dev->dev, "port%d reset ring fail\n",
111 hns_ae_get_vf_cb(q->handle)->port_index);
112 }
113
114 /**
115 *hns_rcb_int_ctrl_hw - rcb irq enable control
116 *@q: hnae queue struct pointer
117 *@flag:ring flag tx or rx
118 *@mask:mask
119 */
hns_rcb_int_ctrl_hw(struct hnae_queue * q,u32 flag,u32 mask)120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
121 {
122 u32 int_mask_en = !!mask;
123
124 if (flag & RCB_INT_FLAG_TX) {
125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
127 int_mask_en);
128 }
129
130 if (flag & RCB_INT_FLAG_RX) {
131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
133 int_mask_en);
134 }
135 }
136
hns_rcb_int_clr_hw(struct hnae_queue * q,u32 flag)137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
138 {
139 if (flag & RCB_INT_FLAG_TX) {
140 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1);
141 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1);
142 }
143
144 if (flag & RCB_INT_FLAG_RX) {
145 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1);
146 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1);
147 }
148 }
149
hns_rcbv2_int_ctrl_hw(struct hnae_queue * q,u32 flag,u32 mask)150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
151 {
152 u32 int_mask_en = !!mask;
153
154 if (flag & RCB_INT_FLAG_TX)
155 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
156
157 if (flag & RCB_INT_FLAG_RX)
158 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
159 }
160
hns_rcbv2_int_clr_hw(struct hnae_queue * q,u32 flag)161 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
162 {
163 if (flag & RCB_INT_FLAG_TX)
164 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1);
165
166 if (flag & RCB_INT_FLAG_RX)
167 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1);
168 }
169
170 /**
171 *hns_rcb_ring_enable_hw - enable ring
172 *@ring: rcb ring
173 */
hns_rcb_ring_enable_hw(struct hnae_queue * q,u32 val)174 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
175 {
176 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
177 }
178
hns_rcb_start(struct hnae_queue * q,u32 val)179 void hns_rcb_start(struct hnae_queue *q, u32 val)
180 {
181 hns_rcb_ring_enable_hw(q, val);
182 }
183
184 /**
185 *hns_rcb_common_init_commit_hw - make rcb common init completed
186 *@rcb_common: rcb common device
187 */
hns_rcb_common_init_commit_hw(struct rcb_common_cb * rcb_common)188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
189 {
190 wmb(); /* Sync point before breakpoint */
191 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
192 wmb(); /* Sync point after breakpoint */
193 }
194
195 /**
196 *hns_rcb_ring_init - init rcb ring
197 *@ring_pair: ring pair control block
198 *@ring_type: ring type, RX_RING or TX_RING
199 */
hns_rcb_ring_init(struct ring_pair_cb * ring_pair,int ring_type)200 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
201 {
202 struct hnae_queue *q = &ring_pair->q;
203 struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
204 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
205 struct hnae_ring *ring =
206 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
207 dma_addr_t dma = ring->desc_dma_addr;
208
209 if (ring_type == RX_RING) {
210 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
211 (u32)dma);
212 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
213 (u32)((dma >> 31) >> 1));
214
215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
216 bd_size_type);
217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
218 ring_pair->port_id_in_comm);
219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
220 ring_pair->port_id_in_comm);
221 } else {
222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
223 (u32)dma);
224 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
225 (u32)((dma >> 31) >> 1));
226
227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
228 bd_size_type);
229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
230 ring_pair->port_id_in_comm);
231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
232 ring_pair->port_id_in_comm);
233 }
234 }
235
236 /**
237 *hns_rcb_init_hw - init rcb hardware
238 *@ring: rcb ring
239 */
hns_rcb_init_hw(struct ring_pair_cb * ring)240 void hns_rcb_init_hw(struct ring_pair_cb *ring)
241 {
242 hns_rcb_ring_init(ring, RX_RING);
243 hns_rcb_ring_init(ring, TX_RING);
244 }
245
246 /**
247 *hns_rcb_set_port_desc_cnt - set rcb port description num
248 *@rcb_common: rcb_common device
249 *@port_idx:port index
250 *@desc_cnt:BD num
251 */
hns_rcb_set_port_desc_cnt(struct rcb_common_cb * rcb_common,u32 port_idx,u32 desc_cnt)252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
253 u32 port_idx, u32 desc_cnt)
254 {
255 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
256 desc_cnt);
257 }
258
hns_rcb_set_port_timeout(struct rcb_common_cb * rcb_common,u32 port_idx,u32 timeout)259 static void hns_rcb_set_port_timeout(
260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
261 {
262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
264 timeout * HNS_RCB_CLK_FREQ_MHZ);
265 else
266 dsaf_write_dev(rcb_common,
267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
268 timeout);
269 }
270
hns_rcb_common_get_port_num(struct rcb_common_cb * rcb_common)271 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
272 {
273 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
274 return HNS_RCB_SERVICE_NW_ENGINE_NUM;
275 else
276 return HNS_RCB_DEBUG_NW_ENGINE_NUM;
277 }
278
279 /*clr rcb comm exception irq**/
hns_rcb_comm_exc_irq_en(struct rcb_common_cb * rcb_common,int en)280 static void hns_rcb_comm_exc_irq_en(
281 struct rcb_common_cb *rcb_common, int en)
282 {
283 u32 clr_vlue = 0xfffffffful;
284 u32 msk_vlue = en ? 0 : 0xfffffffful;
285
286 /* clr int*/
287 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
288
289 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
290
291 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
292
293 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
294 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
295
296 /*en msk*/
297 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
298
299 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
300
301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
302 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
303
304 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
305 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
306 }
307
308 /**
309 *hns_rcb_common_init_hw - init rcb common hardware
310 *@rcb_common: rcb_common device
311 *retuen 0 - success , negative --fail
312 */
hns_rcb_common_init_hw(struct rcb_common_cb * rcb_common)313 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
314 {
315 u32 reg_val;
316 int i;
317 int port_num = hns_rcb_common_get_port_num(rcb_common);
318
319 hns_rcb_comm_exc_irq_en(rcb_common, 0);
320
321 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
322 if (0x1 != (reg_val & 0x1)) {
323 dev_err(rcb_common->dsaf_dev->dev,
324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
325 return -EBUSY;
326 }
327
328 for (i = 0; i < port_num; i++) {
329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
330 (void)hns_rcb_set_coalesced_frames(
331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
332 hns_rcb_set_port_timeout(
333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
334 }
335
336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
337 HNS_RCB_COMMON_ENDIAN);
338
339 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
340 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
341 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
342 } else {
343 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
344 RCB_COM_CFG_FNA_B, false);
345 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
346 RCB_COM_CFG_FA_B, true);
347 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG,
348 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K);
349 }
350
351 return 0;
352 }
353
hns_rcb_buf_size2type(u32 buf_size)354 int hns_rcb_buf_size2type(u32 buf_size)
355 {
356 int bd_size_type;
357
358 switch (buf_size) {
359 case 512:
360 bd_size_type = HNS_BD_SIZE_512_TYPE;
361 break;
362 case 1024:
363 bd_size_type = HNS_BD_SIZE_1024_TYPE;
364 break;
365 case 2048:
366 bd_size_type = HNS_BD_SIZE_2048_TYPE;
367 break;
368 case 4096:
369 bd_size_type = HNS_BD_SIZE_4096_TYPE;
370 break;
371 default:
372 bd_size_type = -EINVAL;
373 }
374
375 return bd_size_type;
376 }
377
hns_rcb_ring_get_cfg(struct hnae_queue * q,int ring_type)378 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
379 {
380 struct hnae_ring *ring;
381 struct rcb_common_cb *rcb_common;
382 struct ring_pair_cb *ring_pair_cb;
383 u32 buf_size;
384 u16 desc_num, mdnum_ppkt;
385 bool irq_idx, is_ver1;
386
387 ring_pair_cb = container_of(q, struct ring_pair_cb, q);
388 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver);
389 if (ring_type == RX_RING) {
390 ring = &q->rx_ring;
391 ring->io_base = ring_pair_cb->q.io_base;
392 irq_idx = HNS_RCB_IRQ_IDX_RX;
393 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
394 } else {
395 ring = &q->tx_ring;
396 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
397 HNS_RCB_TX_REG_OFFSET;
398 irq_idx = HNS_RCB_IRQ_IDX_TX;
399 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT;
401 }
402
403 rcb_common = ring_pair_cb->rcb_common;
404 buf_size = rcb_common->dsaf_dev->buf_size;
405 desc_num = rcb_common->dsaf_dev->desc_num;
406
407 ring->desc = NULL;
408 ring->desc_cb = NULL;
409
410 ring->irq = ring_pair_cb->virq[irq_idx];
411 ring->desc_dma_addr = 0;
412
413 ring->buf_size = buf_size;
414 ring->desc_num = desc_num;
415 ring->max_desc_num_per_pkt = mdnum_ppkt;
416 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
417 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
418 ring->next_to_use = 0;
419 ring->next_to_clean = 0;
420 }
421
hns_rcb_ring_pair_get_cfg(struct ring_pair_cb * ring_pair_cb)422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
423 {
424 ring_pair_cb->q.handle = NULL;
425
426 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
428 }
429
hns_rcb_get_port_in_comm(struct rcb_common_cb * rcb_common,int ring_idx)430 static int hns_rcb_get_port_in_comm(
431 struct rcb_common_cb *rcb_common, int ring_idx)
432 {
433 return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
434 }
435
436 #define SERVICE_RING_IRQ_IDX(v1) \
437 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
hns_rcb_get_base_irq_idx(struct rcb_common_cb * rcb_common)438 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
439 {
440 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
441
442 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
443 return SERVICE_RING_IRQ_IDX(is_ver1);
444 else
445 return HNS_DEBUG_RING_IRQ_IDX;
446 }
447
448 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
449 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
450 /**
451 *hns_rcb_get_cfg - get rcb config
452 *@rcb_common: rcb common device
453 */
hns_rcb_get_cfg(struct rcb_common_cb * rcb_common)454 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
455 {
456 struct ring_pair_cb *ring_pair_cb;
457 u32 i;
458 u32 ring_num = rcb_common->ring_num;
459 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
460 struct platform_device *pdev =
461 to_platform_device(rcb_common->dsaf_dev->dev);
462 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
463
464 for (i = 0; i < ring_num; i++) {
465 ring_pair_cb = &rcb_common->ring_pair_cb[i];
466 ring_pair_cb->rcb_common = rcb_common;
467 ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
468 ring_pair_cb->index = i;
469 ring_pair_cb->q.io_base =
470 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
471 ring_pair_cb->port_id_in_comm =
472 hns_rcb_get_port_in_comm(rcb_common, i);
473 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
474 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
475 platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
476 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
477 is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
478 platform_get_irq(pdev, base_irq_idx + i * 3);
479 ring_pair_cb->q.phy_base =
480 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
481 hns_rcb_ring_pair_get_cfg(ring_pair_cb);
482 }
483 }
484
485 /**
486 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
487 *@rcb_common: rcb_common device
488 *@port_idx:port id in comm
489 *
490 *Returns: coalesced_frames
491 */
hns_rcb_get_coalesced_frames(struct rcb_common_cb * rcb_common,u32 port_idx)492 u32 hns_rcb_get_coalesced_frames(
493 struct rcb_common_cb *rcb_common, u32 port_idx)
494 {
495 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
496 }
497
498 /**
499 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
500 *@rcb_common: rcb_common device
501 *@port_idx:port id in comm
502 *
503 *Returns: time_out
504 */
hns_rcb_get_coalesce_usecs(struct rcb_common_cb * rcb_common,u32 port_idx)505 u32 hns_rcb_get_coalesce_usecs(
506 struct rcb_common_cb *rcb_common, u32 port_idx)
507 {
508 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
509 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
510 HNS_RCB_CLK_FREQ_MHZ;
511 else
512 return dsaf_read_dev(rcb_common,
513 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
514 }
515
516 /**
517 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
518 *@rcb_common: rcb_common device
519 *@port_idx:port id in comm
520 *@timeout:tx/rx time for coalesced time_out
521 *
522 * Returns:
523 * Zero for success, or an error code in case of failure
524 */
hns_rcb_set_coalesce_usecs(struct rcb_common_cb * rcb_common,u32 port_idx,u32 timeout)525 int hns_rcb_set_coalesce_usecs(
526 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
527 {
528 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
529
530 if (timeout == old_timeout)
531 return 0;
532
533 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
534 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
535 dev_err(rcb_common->dsaf_dev->dev,
536 "error: not support coalesce_usecs setting!\n");
537 return -EINVAL;
538 }
539 }
540 if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
541 dev_err(rcb_common->dsaf_dev->dev,
542 "error: coalesce_usecs setting supports 0~1023us\n");
543 return -EINVAL;
544 }
545
546 if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
547 if (timeout == 0)
548 /* set timeout to 0, Disable gap time */
549 dsaf_set_reg_field(rcb_common->io_base,
550 RCB_INT_GAP_TIME_REG + port_idx * 4,
551 PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
552 0);
553 else
554 /* set timeout non 0, restore gap time to 1 */
555 dsaf_set_reg_field(rcb_common->io_base,
556 RCB_INT_GAP_TIME_REG + port_idx * 4,
557 PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
558 1);
559 }
560
561 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
562 return 0;
563 }
564
565 /**
566 *hns_rcb_set_coalesced_frames - set rcb coalesced frames
567 *@rcb_common: rcb_common device
568 *@port_idx:port id in comm
569 *@coalesced_frames:tx/rx BD num for coalesced frames
570 *
571 * Returns:
572 * Zero for success, or an error code in case of failure
573 */
hns_rcb_set_coalesced_frames(struct rcb_common_cb * rcb_common,u32 port_idx,u32 coalesced_frames)574 int hns_rcb_set_coalesced_frames(
575 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
576 {
577 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
578
579 if (coalesced_frames == old_waterline)
580 return 0;
581
582 if (coalesced_frames >= rcb_common->desc_num ||
583 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
584 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
585 dev_err(rcb_common->dsaf_dev->dev,
586 "error: not support coalesce_frames setting!\n");
587 return -EINVAL;
588 }
589
590 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
591 coalesced_frames);
592 return 0;
593 }
594
595 /**
596 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
597 * accordding to dsaf mode
598 *@dsaf_mode: dsaf mode
599 *@max_vfn : max vfn number
600 *@max_q_per_vf:max ring number per vm
601 */
hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,u16 * max_vfn,u16 * max_q_per_vf)602 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
603 u16 *max_q_per_vf)
604 {
605 switch (dsaf_mode) {
606 case DSAF_MODE_DISABLE_6PORT_0VM:
607 *max_vfn = 1;
608 *max_q_per_vf = 16;
609 break;
610 case DSAF_MODE_DISABLE_FIX:
611 case DSAF_MODE_DISABLE_SP:
612 *max_vfn = 1;
613 *max_q_per_vf = 1;
614 break;
615 case DSAF_MODE_DISABLE_2PORT_64VM:
616 *max_vfn = 64;
617 *max_q_per_vf = 1;
618 break;
619 case DSAF_MODE_DISABLE_6PORT_16VM:
620 *max_vfn = 16;
621 *max_q_per_vf = 1;
622 break;
623 default:
624 *max_vfn = 1;
625 *max_q_per_vf = 16;
626 break;
627 }
628 }
629
hns_rcb_get_ring_num(struct dsaf_device * dsaf_dev)630 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
631 {
632 switch (dsaf_dev->dsaf_mode) {
633 case DSAF_MODE_ENABLE_FIX:
634 case DSAF_MODE_DISABLE_SP:
635 return 1;
636
637 case DSAF_MODE_DISABLE_FIX:
638 return 6;
639
640 case DSAF_MODE_ENABLE_0VM:
641 return 32;
642
643 case DSAF_MODE_DISABLE_6PORT_0VM:
644 case DSAF_MODE_ENABLE_16VM:
645 case DSAF_MODE_DISABLE_6PORT_2VM:
646 case DSAF_MODE_DISABLE_6PORT_16VM:
647 case DSAF_MODE_DISABLE_6PORT_4VM:
648 case DSAF_MODE_ENABLE_8VM:
649 return 96;
650
651 case DSAF_MODE_DISABLE_2PORT_16VM:
652 case DSAF_MODE_DISABLE_2PORT_8VM:
653 case DSAF_MODE_ENABLE_32VM:
654 case DSAF_MODE_DISABLE_2PORT_64VM:
655 case DSAF_MODE_ENABLE_128VM:
656 return 128;
657
658 default:
659 dev_warn(dsaf_dev->dev,
660 "get ring num fail,use default!dsaf_mode=%d\n",
661 dsaf_dev->dsaf_mode);
662 return 128;
663 }
664 }
665
hns_rcb_common_get_vaddr(struct rcb_common_cb * rcb_common)666 void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
667 {
668 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
669
670 return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
671 }
672
hns_rcb_common_get_paddr(struct rcb_common_cb * rcb_common)673 static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
674 {
675 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
676
677 return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
678 }
679
hns_rcb_common_get_cfg(struct dsaf_device * dsaf_dev,int comm_index)680 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
681 int comm_index)
682 {
683 struct rcb_common_cb *rcb_common;
684 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
685 u16 max_vfn;
686 u16 max_q_per_vf;
687 int ring_num = hns_rcb_get_ring_num(dsaf_dev);
688
689 rcb_common =
690 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
691 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
692 if (!rcb_common) {
693 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
694 return -ENOMEM;
695 }
696 rcb_common->comm_index = comm_index;
697 rcb_common->ring_num = ring_num;
698 rcb_common->dsaf_dev = dsaf_dev;
699
700 rcb_common->desc_num = dsaf_dev->desc_num;
701
702 hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
703 rcb_common->max_vfn = max_vfn;
704 rcb_common->max_q_per_vf = max_q_per_vf;
705
706 rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
707 rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
708
709 dsaf_dev->rcb_common[comm_index] = rcb_common;
710 return 0;
711 }
712
hns_rcb_common_free_cfg(struct dsaf_device * dsaf_dev,u32 comm_index)713 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
714 u32 comm_index)
715 {
716 dsaf_dev->rcb_common[comm_index] = NULL;
717 }
718
hns_rcb_update_stats(struct hnae_queue * queue)719 void hns_rcb_update_stats(struct hnae_queue *queue)
720 {
721 struct ring_pair_cb *ring =
722 container_of(queue, struct ring_pair_cb, q);
723 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
724 struct ppe_common_cb *ppe_common
725 = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
726 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
727
728 hw_stats->rx_pkts += dsaf_read_dev(queue,
729 RCB_RING_RX_RING_PKTNUM_RECORD_REG);
730 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
731
732 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
733 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
734 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
735 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
736
737 hw_stats->tx_pkts += dsaf_read_dev(queue,
738 RCB_RING_TX_RING_PKTNUM_RECORD_REG);
739 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
740
741 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
742 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
743 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
744 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
745 }
746
747 /**
748 *hns_rcb_get_stats - get rcb statistic
749 *@ring: rcb ring
750 *@data:statistic value
751 */
hns_rcb_get_stats(struct hnae_queue * queue,u64 * data)752 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
753 {
754 u64 *regs_buff = data;
755 struct ring_pair_cb *ring =
756 container_of(queue, struct ring_pair_cb, q);
757 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
758
759 regs_buff[0] = hw_stats->tx_pkts;
760 regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
761 regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
762 regs_buff[3] =
763 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
764
765 regs_buff[4] = queue->tx_ring.stats.tx_pkts;
766 regs_buff[5] = queue->tx_ring.stats.tx_bytes;
767 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
768 regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
769 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
770 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
771 regs_buff[10] = queue->tx_ring.stats.restart_queue;
772 regs_buff[11] = queue->tx_ring.stats.tx_busy;
773
774 regs_buff[12] = hw_stats->rx_pkts;
775 regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
776 regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
777 regs_buff[15] =
778 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
779
780 regs_buff[16] = queue->rx_ring.stats.rx_pkts;
781 regs_buff[17] = queue->rx_ring.stats.rx_bytes;
782 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
783 regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
784 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
785 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
786 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
787 regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
788 regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
789 regs_buff[25] = queue->rx_ring.stats.err_bd_num;
790 regs_buff[26] = queue->rx_ring.stats.l2_err;
791 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
792 }
793
794 /**
795 *hns_rcb_get_ring_sset_count - rcb string set count
796 *@stringset:ethtool cmd
797 *return rcb ring string set count
798 */
hns_rcb_get_ring_sset_count(int stringset)799 int hns_rcb_get_ring_sset_count(int stringset)
800 {
801 if (stringset == ETH_SS_STATS)
802 return HNS_RING_STATIC_REG_NUM;
803
804 return 0;
805 }
806
807 /**
808 *hns_rcb_get_common_regs_count - rcb common regs count
809 *return regs count
810 */
hns_rcb_get_common_regs_count(void)811 int hns_rcb_get_common_regs_count(void)
812 {
813 return HNS_RCB_COMMON_DUMP_REG_NUM;
814 }
815
816 /**
817 *rcb_get_sset_count - rcb ring regs count
818 *return regs count
819 */
hns_rcb_get_ring_regs_count(void)820 int hns_rcb_get_ring_regs_count(void)
821 {
822 return HNS_RCB_RING_DUMP_REG_NUM;
823 }
824
825 /**
826 *hns_rcb_get_strings - get rcb string set
827 *@stringset:string set index
828 *@data:strings name value
829 *@index:queue index
830 */
hns_rcb_get_strings(int stringset,u8 * data,int index)831 void hns_rcb_get_strings(int stringset, u8 *data, int index)
832 {
833 char *buff = (char *)data;
834
835 if (stringset != ETH_SS_STATS)
836 return;
837
838 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
839 buff = buff + ETH_GSTRING_LEN;
840 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
841 buff = buff + ETH_GSTRING_LEN;
842 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
843 buff = buff + ETH_GSTRING_LEN;
844 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
845 buff = buff + ETH_GSTRING_LEN;
846
847 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
848 buff = buff + ETH_GSTRING_LEN;
849 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
850 buff = buff + ETH_GSTRING_LEN;
851 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
852 buff = buff + ETH_GSTRING_LEN;
853 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
854 buff = buff + ETH_GSTRING_LEN;
855 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
856 buff = buff + ETH_GSTRING_LEN;
857 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
858 buff = buff + ETH_GSTRING_LEN;
859 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
860 buff = buff + ETH_GSTRING_LEN;
861 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
862 buff = buff + ETH_GSTRING_LEN;
863
864 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
865 buff = buff + ETH_GSTRING_LEN;
866 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
867 buff = buff + ETH_GSTRING_LEN;
868 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
869 buff = buff + ETH_GSTRING_LEN;
870 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
871 buff = buff + ETH_GSTRING_LEN;
872
873 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
874 buff = buff + ETH_GSTRING_LEN;
875 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
876 buff = buff + ETH_GSTRING_LEN;
877 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
878 buff = buff + ETH_GSTRING_LEN;
879 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
880 buff = buff + ETH_GSTRING_LEN;
881 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
882 buff = buff + ETH_GSTRING_LEN;
883 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
884 buff = buff + ETH_GSTRING_LEN;
885 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
886 buff = buff + ETH_GSTRING_LEN;
887 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
888 buff = buff + ETH_GSTRING_LEN;
889 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
890 buff = buff + ETH_GSTRING_LEN;
891 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
892 buff = buff + ETH_GSTRING_LEN;
893 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
894 buff = buff + ETH_GSTRING_LEN;
895 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
896 }
897
hns_rcb_get_common_regs(struct rcb_common_cb * rcb_com,void * data)898 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
899 {
900 u32 *regs = data;
901 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
902 bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
903 u32 reg_tmp;
904 u32 reg_num_tmp;
905 u32 i = 0;
906
907 /*rcb common registers */
908 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
909 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
910 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
911
912 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
913 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
914 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
915 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
916 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
917 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
918
919 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
920 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
921 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
922 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
923 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
924 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
925 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
926 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
927 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
928 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
929 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
930 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
931 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
932 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
933 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
934 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
935 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
936 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
937 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
938
939 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
940 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
941 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
942 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
943 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
944 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
945 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
946 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
947 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
948 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
949
950 /* rcb common entry registers */
951 for (i = 0; i < 16; i++) { /* total 16 model registers */
952 regs[38 + i]
953 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
954 regs[54 + i]
955 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
956 }
957
958 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
959 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
960 for (i = 0; i < reg_num_tmp; i++)
961 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
962
963 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
964 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
965
966 /* mark end of rcb common regs */
967 for (i = 78; i < 80; i++)
968 regs[i] = 0xcccccccc;
969 }
970
hns_rcb_get_ring_regs(struct hnae_queue * queue,void * data)971 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
972 {
973 u32 *regs = data;
974 struct ring_pair_cb *ring_pair
975 = container_of(queue, struct ring_pair_cb, q);
976 u32 i = 0;
977
978 /*rcb ring registers */
979 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
980 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
981 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
982 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
983 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
984 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
985 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
986 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
987 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
988
989 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
990 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
991 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
992 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
993 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
994 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
995 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
996 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
997 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
998 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
999
1000 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
1001 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
1002 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
1003 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
1004 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
1005 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
1006 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
1007
1008 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
1009 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
1010 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
1011 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
1012 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
1013 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
1014 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
1015 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
1016
1017 /* mark end of ring regs */
1018 for (i = 35; i < 40; i++)
1019 regs[i] = 0xcccccc00 + ring_pair->index;
1020 }
1021