1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #ifndef COMMON_H
12 #define COMMON_H
13
14 #include "rvu_struct.h"
15
16 #define OTX2_ALIGN 128 /* Align to cacheline */
17
18 #define Q_SIZE_16 0ULL /* 16 entries */
19 #define Q_SIZE_64 1ULL /* 64 entries */
20 #define Q_SIZE_256 2ULL
21 #define Q_SIZE_1K 3ULL
22 #define Q_SIZE_4K 4ULL
23 #define Q_SIZE_16K 5ULL
24 #define Q_SIZE_64K 6ULL
25 #define Q_SIZE_256K 7ULL
26 #define Q_SIZE_1M 8ULL /* Million entries */
27 #define Q_SIZE_MIN Q_SIZE_16
28 #define Q_SIZE_MAX Q_SIZE_1M
29
30 #define Q_COUNT(x) (16ULL << (2 * x))
31 #define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
32
33 /* Admin queue info */
34
35 /* Since we intend to add only one instruction at a time,
36 * keep queue size to it's minimum.
37 */
38 #define AQ_SIZE Q_SIZE_16
39 /* HW head & tail pointer mask */
40 #define AQ_PTR_MASK 0xFFFFF
41
42 struct qmem {
43 void *base;
44 dma_addr_t iova;
45 int alloc_sz;
46 u16 entry_sz;
47 u8 align;
48 u32 qsize;
49 };
50
qmem_alloc(struct device * dev,struct qmem ** q,int qsize,int entry_sz)51 static inline int qmem_alloc(struct device *dev, struct qmem **q,
52 int qsize, int entry_sz)
53 {
54 struct qmem *qmem;
55 int aligned_addr;
56
57 if (!qsize)
58 return -EINVAL;
59
60 *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
61 if (!*q)
62 return -ENOMEM;
63 qmem = *q;
64
65 qmem->entry_sz = entry_sz;
66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
67 qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
68 &qmem->iova, GFP_KERNEL);
69 if (!qmem->base)
70 return -ENOMEM;
71
72 qmem->qsize = qsize;
73
74 aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
75 qmem->align = (aligned_addr - qmem->iova);
76 qmem->base += qmem->align;
77 qmem->iova += qmem->align;
78 return 0;
79 }
80
qmem_free(struct device * dev,struct qmem * qmem)81 static inline void qmem_free(struct device *dev, struct qmem *qmem)
82 {
83 if (!qmem)
84 return;
85
86 if (qmem->base)
87 dma_free_coherent(dev, qmem->alloc_sz,
88 qmem->base - qmem->align,
89 qmem->iova - qmem->align);
90 devm_kfree(dev, qmem);
91 }
92
93 struct admin_queue {
94 struct qmem *inst;
95 struct qmem *res;
96 spinlock_t lock; /* Serialize inst enqueue from PFs */
97 };
98
99 /* NPA aura count */
100 enum npa_aura_sz {
101 NPA_AURA_SZ_0,
102 NPA_AURA_SZ_128,
103 NPA_AURA_SZ_256,
104 NPA_AURA_SZ_512,
105 NPA_AURA_SZ_1K,
106 NPA_AURA_SZ_2K,
107 NPA_AURA_SZ_4K,
108 NPA_AURA_SZ_8K,
109 NPA_AURA_SZ_16K,
110 NPA_AURA_SZ_32K,
111 NPA_AURA_SZ_64K,
112 NPA_AURA_SZ_128K,
113 NPA_AURA_SZ_256K,
114 NPA_AURA_SZ_512K,
115 NPA_AURA_SZ_1M,
116 NPA_AURA_SZ_MAX,
117 };
118
119 #define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
120
121 /* NPA AQ result structure for init/read/write of aura HW contexts */
122 struct npa_aq_aura_res {
123 struct npa_aq_res_s res;
124 struct npa_aura_s aura_ctx;
125 struct npa_aura_s ctx_mask;
126 };
127
128 /* NPA AQ result structure for init/read/write of pool HW contexts */
129 struct npa_aq_pool_res {
130 struct npa_aq_res_s res;
131 struct npa_pool_s pool_ctx;
132 struct npa_pool_s ctx_mask;
133 };
134
135 /* NIX Transmit schedulers */
136 enum nix_scheduler {
137 NIX_TXSCH_LVL_SMQ = 0x0,
138 NIX_TXSCH_LVL_MDQ = 0x0,
139 NIX_TXSCH_LVL_TL4 = 0x1,
140 NIX_TXSCH_LVL_TL3 = 0x2,
141 NIX_TXSCH_LVL_TL2 = 0x3,
142 NIX_TXSCH_LVL_TL1 = 0x4,
143 NIX_TXSCH_LVL_CNT = 0x5,
144 };
145
146 #define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
147 #define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
148 #define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
149 #define MAX_SCHED_WEIGHT 0xFF
150 #define DFLT_RR_WEIGHT 71
151 #define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
152 / MAX_SCHED_WEIGHT)
153
154 /* Min/Max packet sizes, excluding FCS */
155 #define NIC_HW_MIN_FRS 40
156 #define NIC_HW_MAX_FRS 9212
157 #define SDP_HW_MAX_FRS 65535
158
159 /* NIX RX action operation*/
160 #define NIX_RX_ACTIONOP_DROP (0x0ull)
161 #define NIX_RX_ACTIONOP_UCAST (0x1ull)
162 #define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
163 #define NIX_RX_ACTIONOP_MCAST (0x3ull)
164 #define NIX_RX_ACTIONOP_RSS (0x4ull)
165
166 /* NIX TX action operation*/
167 #define NIX_TX_ACTIONOP_DROP (0x0ull)
168 #define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
169 #define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
170 #define NIX_TX_ACTIONOP_MCAST (0x3ull)
171 #define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
172
173 #define NPC_MCAM_KEY_X1 0
174 #define NPC_MCAM_KEY_X2 1
175 #define NPC_MCAM_KEY_X4 2
176
177 #define NIX_INTF_RX 0
178 #define NIX_INTF_TX 1
179
180 #define NIX_INTF_TYPE_CGX 0
181 #define NIX_INTF_TYPE_LBK 1
182
183 #define MAX_LMAC_PKIND 12
184 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
185 #define NIX_LINK_LBK(a) (12 + (a))
186 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
187 #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
188
189 /* NIX LSO format indices.
190 * As of now TSO is the only one using, so statically assigning indices.
191 */
192 #define NIX_LSO_FORMAT_IDX_TSOV4 0
193 #define NIX_LSO_FORMAT_IDX_TSOV6 1
194
195 /* RSS info */
196 #define MAX_RSS_GROUPS 8
197 /* Group 0 has to be used in default pkt forwarding MCAM entries
198 * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
199 * filters.
200 */
201 #define DEFAULT_RSS_CONTEXT_GROUP 0
202 #define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
203
204 /* NDC info */
205 enum ndc_idx_e {
206 NIX0_RX = 0x0,
207 NIX0_TX = 0x1,
208 NPA0_U = 0x2,
209 };
210
211 enum ndc_ctype_e {
212 CACHING = 0x0,
213 BYPASS = 0x1,
214 };
215
216 #define NDC_MAX_PORT 6
217 #define NDC_READ_TRANS 0
218 #define NDC_WRITE_TRANS 1
219
220 #endif /* COMMON_H */
221