1 /*
2 * Copyright (c) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ctrl.h"
17 #include "eth_drv.h"
18 #include "hieth_pri.h"
19 #include "hdf_netbuf.h"
20 #include "mdio.h"
21 #include <linux/delay.h>
22 #include <netinet/ip.h>
23 #include <netinet/in.h>
24 #include <netinet/tcp.h>
25 #include <netinet/if_ether.h>
26
FephyExpandedRead(struct HiethNetdevLocal * ld,int32_t phyAddr,int32_t regNum)27 static inline int32_t FephyExpandedRead(struct HiethNetdevLocal *ld, int32_t phyAddr, int32_t regNum)
28 {
29 HiethMdioWrite(ld, phyAddr, MII_EXPMA, regNum);
30 return HiethMdioRead(ld, phyAddr, MII_EXPMD);
31 }
32
FephyExpandedWrite(struct HiethNetdevLocal * ld,int32_t phyAddr,int32_t regNum,int32_t val)33 static inline int32_t FephyExpandedWrite(struct HiethNetdevLocal *ld, int32_t phyAddr, int32_t regNum, int32_t val)
34 {
35 HiethMdioWrite(ld, phyAddr, MII_EXPMA, regNum);
36 return HiethMdioWrite(ld, phyAddr, MII_EXPMD, val);
37 }
38
HiethFephyUseDefaultTrim(struct HiethNetdevLocal * ld,const EthPhyAccess * phyAccess)39 static void HiethFephyUseDefaultTrim(struct HiethNetdevLocal *ld, const EthPhyAccess *phyAccess)
40 {
41 uint16_t val;
42 int32_t timeout = 3;
43
44 do {
45 msleep(250);
46 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_DEF_ATE);
47 val &= BIT_AUTOTRIM_DONE; /* (0x1 << 0) */
48 } while (!val && --timeout);
49
50 if (!timeout) {
51 HDF_LOGE("festa PHY wait autotrim done timeout!");
52 }
53 mdelay(5);
54 }
55
HiethFephyTrim(struct HiethNetdevLocal * ld,const EthPhyAccess * phyAccess)56 void HiethFephyTrim(struct HiethNetdevLocal *ld, const EthPhyAccess *phyAccess)
57 {
58 uint32_t val;
59 int32_t timeout = 50;
60 uint8_t ldSet, ldoSet, tuning;
61
62 val = readl(SYS_CTRL_REG_BASE + 0x8024);
63 ldSet = (val >> BIT_OFFSET_LD_SET) & BIT_MASK_LD_SET;
64 ldoSet = (val >> BIT_OFFSET_LDO_SET) & BIT_MASK_LDO_SET;
65 tuning = (val >> BIT_OFFSET_R_TUNING) & BIT_MASK_R_TUNING;
66 if ((!ldSet) && (!ldoSet) && (!tuning)) {
67 HiethFephyUseDefaultTrim(ld, phyAccess);
68 return;
69 }
70 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_LD_AM);
71 val = (val & ~BIT_MASK_LD_SET) | (ldSet & BIT_MASK_LD_SET);
72 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_LD_AM, val);
73
74 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_LDO_AM);
75 val = (val & ~BIT_MASK_LDO_SET) | (ldoSet & BIT_MASK_LDO_SET);
76 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_LDO_AM, val);
77
78 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_R_TUNING);
79 val = (val & ~BIT_MASK_R_TUNING) | (tuning & BIT_MASK_R_TUNING);
80 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_R_TUNING, val);
81
82 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_WR_DONE);
83 if (val & BIT_CFG_ACK) {
84 HDF_LOGE("festa PHY 0x3053 bit CFG_ACK value: 1");
85 }
86 val = val | BIT_CFG_DONE;
87
88 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_WR_DONE, val);
89
90 do {
91 msleep(5);
92 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_WR_DONE);
93 val &= BIT_CFG_ACK;
94 } while (!val && --timeout);
95
96 if (!timeout) {
97 HDF_LOGE("festa PHY 0x3053 wait bit CFG_ACK timeout!\n");
98 }
99
100 mdelay(5);
101 }
102
HiethEnableRxcsumDrop(struct HiethNetdevLocal * ld,bool drop)103 static inline void HiethEnableRxcsumDrop(struct HiethNetdevLocal *ld, bool drop)
104 {
105 HiethWritelBits(ld, drop, UD_REG_NAME(GLB_RX_COE_CTRL), BITS_COE_IPHDR_DROP);
106 HiethWritelBits(ld, false, UD_REG_NAME(GLB_RX_COE_CTRL), BITS_COE_PAYLOAD_DROP);
107 HiethWritelBits(ld, drop, UD_REG_NAME(GLB_RX_COE_CTRL), BITS_COE_IPV6_UDP_ZERO_DROP);
108 }
109
HiethHwMacCoreInit(struct HiethNetdevLocal * ld)110 void HiethHwMacCoreInit(struct HiethNetdevLocal *ld)
111 {
112 OsalSpinInit(&(ld->tx_lock));
113 OsalSpinInit(&(ld->rx_lock));
114
115 #ifdef HIETH_RXCSUM_SUPPORTED
116 HiethEnableRxcsumDrop(ld, true);
117 #endif
118
119 #ifdef HIETH_TSO_SUPPORTED
120 ld->sgHead = ld->sgTail = 0;
121 ld->txqHead = ld->txqTail = 0;
122 #endif
123 ld->txHwCnt = 0;
124
125 /* setup hardware */
126 HiethSetHwqDepth(ld);
127 }
128
HiethHwExternalPhyReset(void)129 void HiethHwExternalPhyReset(void)
130 {
131 uint32_t val;
132
133 READ_UINT32(val, HIETH_CRG_IOBASE);
134 val |= ETH_PHY_RESET;
135 WRITE_UINT32(val, HIETH_CRG_IOBASE);
136
137 LOS_Msleep(20);
138
139 READ_UINT32(val, HIETH_CRG_IOBASE);
140 val &= ~ETH_PHY_RESET;
141 WRITE_UINT32(val, HIETH_CRG_IOBASE);
142
143 LOS_Msleep(30);
144 }
145
IrqEnable(struct HiethNetdevLocal * ld,int32_t irqs)146 static inline int32_t IrqEnable(struct HiethNetdevLocal *ld, int32_t irqs)
147 {
148 unsigned long old;
149
150 old = HiethRead(ld, GLB_RW_IRQ_ENA);
151 HiethWrite(ld, old | (unsigned long)irqs, GLB_RW_IRQ_ENA);
152 old = HiethRead(ld, GLB_RW_IRQ_ENA);
153 return old;
154 }
155
IrqDisable(struct HiethNetdevLocal * ld,int32_t irqs)156 static inline int32_t IrqDisable(struct HiethNetdevLocal *ld, int32_t irqs)
157 {
158 unsigned long old;
159
160 old = HiethRead(ld, GLB_RW_IRQ_ENA);
161 HiethWrite(ld, old & (~(unsigned long)irqs), GLB_RW_IRQ_ENA);
162 return old;
163 }
164
ReadIrqstatus(struct HiethNetdevLocal * ld)165 static inline int32_t ReadIrqstatus(struct HiethNetdevLocal *ld)
166 {
167 int32_t status;
168
169 status = HiethRead(ld, GLB_RO_IRQ_STAT);
170 return status;
171 }
172
HiethHwSetMacAddress(struct HiethNetdevLocal * ld,int32_t ena,const uint8_t * mac)173 int32_t HiethHwSetMacAddress(struct HiethNetdevLocal *ld, int32_t ena, const uint8_t *mac)
174 {
175 unsigned long reg;
176
177 if (ld->port == DOWN_PORT) {
178 HiethWritelBits(ld, 1, GLB_DN_HOSTMAC_ENA, BITS_DN_HOST_ENA);
179 }
180
181 reg = mac[1] | (mac[0] << 8);
182 if (ld->port == UP_PORT) {
183 HiethWrite(ld, reg, GLB_HOSTMAC_H16);
184 }
185 else {
186 HiethWrite(ld, reg, GLB_DN_HOSTMAC_H16);
187 }
188
189 reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
190 if (ld->port == UP_PORT) {
191 HiethWrite(ld, reg, GLB_HOSTMAC_L32);
192 }
193 else {
194 HiethWrite(ld, reg, GLB_DN_HOSTMAC_L32);
195 }
196 return HDF_SUCCESS;
197 }
198
HiethHwGetMacAddress(struct HiethNetdevLocal * ld,uint8_t * mac)199 int32_t HiethHwGetMacAddress(struct HiethNetdevLocal *ld, uint8_t *mac)
200 {
201 unsigned long reg;
202
203 if (ld->port == UP_PORT) {
204 reg = HiethRead(ld, GLB_HOSTMAC_H16);
205 }
206 else {
207 reg = HiethRead(ld, GLB_DN_HOSTMAC_H16);
208 }
209 mac[0] = (reg >> 8) & 0xff;
210 mac[1] = reg & 0xff;
211
212 if (ld->port == UP_PORT) {
213 reg = HiethRead(ld, GLB_HOSTMAC_L32);
214 }
215 else {
216 reg = HiethRead(ld, GLB_DN_HOSTMAC_L32);
217 }
218 mac[2] = (reg >> 24) & 0xff;
219 mac[3] = (reg >> 16) & 0xff;
220 mac[4] = (reg >> 8) & 0xff;
221 mac[5] = reg & 0xff;
222 return HDF_SUCCESS;
223 }
224
TestXmitQueueReady(struct HiethNetdevLocal * ld)225 int32_t TestXmitQueueReady(struct HiethNetdevLocal *ld)
226 {
227 return HiethReadlBits(ld, UD_REG_NAME(GLB_RO_QUEUE_STAT), BITS_XMITQ_RDY);
228 }
229
HiethIrqEnable(struct HiethNetdevLocal * ld,int32_t irqs)230 int32_t HiethIrqEnable(struct HiethNetdevLocal *ld, int32_t irqs)
231 {
232 int32_t old;
233
234 OsalSpinLockIrq(&hiethGlbRegLock);
235 old = IrqEnable(ld, irqs);
236 OsalSpinUnlockIrq(&hiethGlbRegLock);
237 return old;
238 }
239
HiethIrqDisable(struct HiethNetdevLocal * ld,int32_t irqs)240 int32_t HiethIrqDisable(struct HiethNetdevLocal *ld, int32_t irqs)
241 {
242 int32_t old;
243
244 OsalSpinLockIrq(&hiethGlbRegLock);
245 old = IrqDisable(ld, irqs);
246 OsalSpinUnlockIrq(&hiethGlbRegLock);
247 return old;
248 }
249
HiethReadIrqstatus(struct HiethNetdevLocal * ld)250 int32_t HiethReadIrqstatus(struct HiethNetdevLocal *ld)
251 {
252 return ReadIrqstatus(ld);
253 }
254
HiethClearIrqstatus(struct HiethNetdevLocal * ld,int32_t irqs)255 int32_t HiethClearIrqstatus(struct HiethNetdevLocal *ld, int32_t irqs)
256 {
257 int32_t status;
258
259 OsalSpinLockIrq(&hiethGlbRegLock);
260 HiethWrite(ld, irqs, GLB_RW_IRQ_RAW);
261 status = ReadIrqstatus(ld);
262 OsalSpinUnlockIrq(&hiethGlbRegLock);
263 return status;
264 }
265
HiethSetEndianMode(struct HiethNetdevLocal * ld,int32_t mode)266 int32_t HiethSetEndianMode(struct HiethNetdevLocal *ld, int32_t mode)
267 {
268 int32_t old;
269
270 old = HiethReadlBits(ld, GLB_ENDIAN_MOD, BITS_ENDIAN);
271 HiethWritelBits(ld, mode, GLB_ENDIAN_MOD, BITS_ENDIAN);
272 return old;
273 }
274
HiethSetHwqDepth(struct HiethNetdevLocal * ld)275 int32_t HiethSetHwqDepth(struct HiethNetdevLocal *ld)
276 {
277 HiethAssert(ld->depth.hwXmitq > 0 && ld->depth.hwXmitq <= HIETH_MAX_QUEUE_DEPTH);
278 if ((ld->depth.hwXmitq) > HIETH_MAX_QUEUE_DEPTH) {
279 BUG();
280 return HDF_FAILURE;
281 }
282 HiethWritelBits(ld, ld->depth.hwXmitq, UD_REG_NAME(GLB_QLEN_SET), BITS_TXQ_DEP);
283 HiethWritelBits(ld, HIETH_MAX_QUEUE_DEPTH - ld->depth.hwXmitq, UD_REG_NAME(GLB_QLEN_SET), BITS_RXQ_DEP);
284 return HDF_SUCCESS;
285 }
286
HiethXmitReleasePkt(struct HiethNetdevLocal * ld,const HiethPriv * priv)287 int32_t HiethXmitReleasePkt(struct HiethNetdevLocal *ld, const HiethPriv *priv)
288 {
289 int32_t ret = 0;
290 struct TxPktInfo *txqCur = NULL;
291 int32_t txReclaimCnt = 0;
292 struct PbufInfo *pbuf = NULL;
293
294 OsalSpinLockIrq(&(ld->tx_lock));
295
296 while (HwXmitqCntInUse(ld) < ld->txHwCnt) {
297 HiethAssert(ld->txHwCnt);
298
299 txqCur = ld->txq + ld->txqTail;
300 if (txqCur->txAddr == 0) {
301 HDF_LOGE("%s: txAddr is invalid.", __func__);
302 }
303 pbuf = priv->ram->pbufInfo + ld->txqTail;
304 if (pbuf->sgLen != 1) {
305 HDF_LOGE("%s: pbuf info sg len is not 1.", __func__);
306 }
307 pbuf->dmaInfo[0] = NULL;
308 NetBufFree(pbuf->buf);
309
310 txqCur->txAddr = 0;
311
312 ld->txqTail++;
313 if (ld->txqTail == ld->qSize) {
314 ld->txqTail = 0;
315 }
316
317 txReclaimCnt++;
318 ld->txHwCnt--;
319 }
320
321 if (txReclaimCnt && ld->txBusy) {
322 ld->txBusy = 0;
323 struct HiethPlatformData *hiethPlatformData = GetHiethPlatformData();
324 LOS_EventWrite(&(hiethPlatformData[priv->index].stEvent), EVENT_NET_CAN_SEND);
325 }
326
327 OsalSpinUnlockIrq(&(ld->tx_lock));
328 return ret;
329 }
330
HiethXmitGso(struct HiethNetdevLocal * ld,const HiethPriv * priv,NetBuf * netBuf)331 int32_t HiethXmitGso(struct HiethNetdevLocal *ld, const HiethPriv *priv, NetBuf *netBuf)
332 {
333 struct TxPktInfo *txqCur = NULL;
334 int32_t sendPktLen, sgLen;
335 struct PbufInfo *pbInfo = NULL;
336
337 if (netBuf == NULL) {
338 HDF_LOGE("%sL netBuf is NULL", __func__);
339 return HDF_FAILURE;
340 }
341
342 sendPktLen = NetBufGetDataLen(netBuf);
343 if (sendPktLen > HIETH_MAX_FRAME_SIZE) {
344 HDF_LOGE("%s: xmit error len=%d", __func__, sendPktLen);
345 }
346
347 pbInfo = &(priv->ram->pbufInfo[ld->txqHead]);
348 sgLen = 0;
349 pbInfo->dmaInfo[sgLen] = (void *)NetBufGetAddress(netBuf, E_DATA_BUF);
350 sgLen++;
351 pbInfo->sgLen = sgLen;
352 pbInfo->buf = netBuf;
353
354 txqCur = ld->txq + ld->txqHead;
355 txqCur->tx.val = 0;
356
357 /* default config, default closed checksum offload function */
358 txqCur->tx.info.tsoFlag = HIETH_CSUM_DISABLE;
359 txqCur->tx.info.coeFlag = HIETH_CSUM_DISABLE;
360 if (sgLen == 1) {
361 txqCur->tx.info.sgFlag = 0;
362 NetDmaCacheClean((void *)NetBufGetAddress(netBuf, E_DATA_BUF), sendPktLen);
363 txqCur->txAddr = (uintptr_t)NetBufGetAddress(netBuf, E_DATA_BUF);
364 } else {
365 HDF_LOGE("sg len is not 1");
366 NetBufFree(netBuf);
367 return HDF_FAILURE;
368 }
369
370 txqCur->tx.info.dataLen = sendPktLen + FCS_BYTES;
371
372 HwXmitqPkg(ld, VMM_TO_DMA_ADDR(txqCur->txAddr), txqCur->tx.val);
373 ld->txqHead++;
374 if (ld->txqHead == ld->qSize) {
375 ld->txqHead = 0;
376 }
377 return HDF_SUCCESS;
378 }
379
HiethFeedHw(struct HiethNetdevLocal * ld,HiethPriv * priv)380 int32_t HiethFeedHw(struct HiethNetdevLocal *ld, HiethPriv *priv)
381 {
382 int32_t cnt = 0;
383 NetBuf *netBuf = NULL;
384 uint32_t rxFeedNext;
385
386 OsalSpinLockIrq(&(ld->rx_lock));
387
388 while (HiethReadlBits(ld, UD_REG_NAME(GLB_RO_QUEUE_STAT), BITS_RECVQ_RDY)) {
389 rxFeedNext = priv->rxFeed + 1;
390 if (rxFeedNext == HIETH_HWQ_RXQ_DEPTH) {
391 rxFeedNext = 0;
392 }
393 if (rxFeedNext == priv->rxRelease) {
394 break;
395 }
396
397 netBuf = NetBufAlloc(ALIGN(HIETH_MAX_FRAME_SIZE + ETH_PAD_SIZE + CACHE_ALIGNED_SIZE, CACHE_ALIGNED_SIZE));
398 if (netBuf == NULL) {
399 HDF_LOGE("%sL netBuf alloc fail", __func__);
400 break;
401 }
402
403 /* drop some bytes for making alignment of net dma cache */
404 netBuf->bufs[E_DATA_BUF].offset += (ALIGN((uintptr_t)NetBufGetAddress(netBuf, E_DATA_BUF), CACHE_ALIGNED_SIZE) -
405 (uintptr_t)NetBufGetAddress(netBuf, E_DATA_BUF));
406
407 #if ETH_PAD_SIZE
408 /* drop the padding word */
409 netBuf->bufs[E_DATA_BUF].offset += ETH_PAD_SIZE;
410 #endif
411
412 priv->ram->rxNetbuf[priv->rxFeed] = netBuf;
413 NetDmaCacheInv(NetBufGetAddress(netBuf, E_DATA_BUF), HIETH_MAX_FRAME_SIZE);
414
415 HiethWrite(ld, VMM_TO_DMA_ADDR((UINTPTR)NetBufGetAddress(netBuf, E_DATA_BUF)), UD_REG_NAME(GLB_IQ_ADDR));
416 priv->rxFeed = rxFeedNext;
417 cnt++;
418 }
419
420 OsalSpinUnlockIrq(&(ld->rx_lock));
421 return cnt;
422 }
423