1 /*
2 * Copyright (c) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ctrl.h"
17 #include "eth_drv.h"
18 #include "hieth_pri.h"
19 #include "hdf_netbuf.h"
20 #include "mdio.h"
21 #include <linux/delay.h>
22 #include <netinet/ip.h>
23 #include <netinet/in.h>
24 #include <netinet/tcp.h>
25 #include <netinet/if_ether.h>
26
FephyExpandedRead(struct HiethNetdevLocal * ld,int32_t phyAddr,int32_t regNum)27 static inline int32_t FephyExpandedRead(struct HiethNetdevLocal *ld, int32_t phyAddr, int32_t regNum)
28 {
29 HiethMdioWrite(ld, phyAddr, MII_EXPMA, regNum);
30 return HiethMdioRead(ld, phyAddr, MII_EXPMD);
31 }
32
FephyExpandedWrite(struct HiethNetdevLocal * ld,int32_t phyAddr,int32_t regNum,int32_t val)33 static inline int32_t FephyExpandedWrite(struct HiethNetdevLocal *ld, int32_t phyAddr, int32_t regNum, int32_t val)
34 {
35 HiethMdioWrite(ld, phyAddr, MII_EXPMA, regNum);
36 return HiethMdioWrite(ld, phyAddr, MII_EXPMD, val);
37 }
38
HiethFephyUseDefaultTrim(struct HiethNetdevLocal * ld,const EthPhyAccess * phyAccess)39 static void HiethFephyUseDefaultTrim(struct HiethNetdevLocal *ld, const EthPhyAccess *phyAccess)
40 {
41 uint16_t val;
42 int32_t timeout = 3;
43
44 do {
45 msleep(250);
46 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_DEF_ATE);
47 val &= BIT_AUTOTRIM_DONE; /* (0x1 << 0) */
48 } while (!val && --timeout);
49
50 if (!timeout) {
51 HDF_LOGE("festa PHY wait autotrim done timeout!");
52 }
53 mdelay(5);
54 }
55
HiethFephyTrim(struct HiethNetdevLocal * ld,const EthPhyAccess * phyAccess)56 void HiethFephyTrim(struct HiethNetdevLocal *ld, const EthPhyAccess *phyAccess)
57 {
58 uint32_t val;
59 int32_t timeout = 50;
60 uint8_t ldSet, ldoSet, tuning;
61
62 val = readl(SYS_CTRL_REG_BASE + 0x8024);
63 ldSet = (val >> BIT_OFFSET_LD_SET) & BIT_MASK_LD_SET;
64 ldoSet = (val >> BIT_OFFSET_LDO_SET) & BIT_MASK_LDO_SET;
65 tuning = (val >> BIT_OFFSET_R_TUNING) & BIT_MASK_R_TUNING;
66 if ((!ldSet) && (!ldoSet) && (!tuning)) {
67 HiethFephyUseDefaultTrim(ld, phyAccess);
68 return;
69 }
70 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_LD_AM);
71 val = (val & ~BIT_MASK_LD_SET) | (ldSet & BIT_MASK_LD_SET);
72 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_LD_AM, val);
73
74 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_LDO_AM);
75 val = (val & ~BIT_MASK_LDO_SET) | (ldoSet & BIT_MASK_LDO_SET);
76 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_LDO_AM, val);
77
78 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_R_TUNING);
79 val = (val & ~BIT_MASK_R_TUNING) | (tuning & BIT_MASK_R_TUNING);
80 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_R_TUNING, val);
81
82 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_WR_DONE);
83 if (val & BIT_CFG_ACK) {
84 HDF_LOGE("festa PHY 0x3053 bit CFG_ACK value: 1");
85 }
86 val = val | BIT_CFG_DONE;
87
88 FephyExpandedWrite(ld, phyAccess->phyAddr, REG_WR_DONE, val);
89
90 do {
91 msleep(5);
92 val = FephyExpandedRead(ld, phyAccess->phyAddr, REG_WR_DONE);
93 val &= BIT_CFG_ACK;
94 } while (!val && --timeout);
95
96 if (!timeout) {
97 HDF_LOGE("festa PHY 0x3053 wait bit CFG_ACK timeout!\n");
98 }
99
100 mdelay(5);
101 }
102
HiethEnableRxcsumDrop(struct HiethNetdevLocal * ld,bool drop)103 static inline void HiethEnableRxcsumDrop(struct HiethNetdevLocal *ld, bool drop)
104 {
105 HiethWritelBits(ld, drop, UD_REG_NAME(GLB_RX_COE_CTRL), BITS_COE_IPHDR_DROP);
106 HiethWritelBits(ld, false, UD_REG_NAME(GLB_RX_COE_CTRL), BITS_COE_PAYLOAD_DROP);
107 HiethWritelBits(ld, drop, UD_REG_NAME(GLB_RX_COE_CTRL), BITS_COE_IPV6_UDP_ZERO_DROP);
108 }
109
HiethHwMacCoreInit(struct HiethNetdevLocal * ld)110 void HiethHwMacCoreInit(struct HiethNetdevLocal *ld)
111 {
112 OsalSpinInit(&(ld->tx_lock));
113 OsalSpinInit(&(ld->rx_lock));
114
115 #ifdef HIETH_RXCSUM_SUPPORTED
116 HiethEnableRxcsumDrop(ld, true);
117 #endif
118
119 #ifdef HIETH_TSO_SUPPORTED
120 ld->sgHead = ld->sgTail = 0;
121 ld->txqHead = ld->txqTail = 0;
122 #endif
123 ld->txHwCnt = 0;
124
125 /* setup hardware */
126 (void)HiethSetHwqDepth(ld);
127 }
128
HiethHwExternalPhyReset(void)129 void HiethHwExternalPhyReset(void)
130 {
131 uint32_t val;
132
133 READ_UINT32(val, HIETH_CRG_IOBASE);
134 val |= ETH_PHY_RESET;
135 WRITE_UINT32(val, HIETH_CRG_IOBASE);
136
137 LOS_Msleep(20);
138
139 READ_UINT32(val, HIETH_CRG_IOBASE);
140 val &= ~ETH_PHY_RESET;
141 WRITE_UINT32(val, HIETH_CRG_IOBASE);
142
143 LOS_Msleep(30);
144 }
145
IrqEnable(struct HiethNetdevLocal * ld,int32_t irqs)146 static inline int32_t IrqEnable(struct HiethNetdevLocal *ld, int32_t irqs)
147 {
148 unsigned long old;
149
150 old = HiethRead(ld, GLB_RW_IRQ_ENA);
151 HiethWrite(ld, old | (unsigned long)irqs, GLB_RW_IRQ_ENA);
152 old = HiethRead(ld, GLB_RW_IRQ_ENA);
153 return old;
154 }
155
IrqDisable(struct HiethNetdevLocal * ld,int32_t irqs)156 static inline int32_t IrqDisable(struct HiethNetdevLocal *ld, int32_t irqs)
157 {
158 unsigned long old;
159
160 old = HiethRead(ld, GLB_RW_IRQ_ENA);
161 HiethWrite(ld, old & (~(unsigned long)irqs), GLB_RW_IRQ_ENA);
162 return old;
163 }
164
ReadIrqstatus(struct HiethNetdevLocal * ld)165 static inline int32_t ReadIrqstatus(struct HiethNetdevLocal *ld)
166 {
167 int32_t status;
168
169 status = HiethRead(ld, GLB_RO_IRQ_STAT);
170 return status;
171 }
172
HiethHwSetMacAddress(struct HiethNetdevLocal * ld,int32_t ena,const uint8_t * mac)173 int32_t HiethHwSetMacAddress(struct HiethNetdevLocal *ld, int32_t ena, const uint8_t *mac)
174 {
175 unsigned long reg;
176
177 if (ld->port == DOWN_PORT) {
178 HiethWritelBits(ld, 1, GLB_DN_HOSTMAC_ENA, BITS_DN_HOST_ENA);
179 }
180
181 reg = mac[1] | (mac[0] << 8);
182 if (ld->port == UP_PORT) {
183 HiethWrite(ld, reg, GLB_HOSTMAC_H16);
184 } else {
185 HiethWrite(ld, reg, GLB_DN_HOSTMAC_H16);
186 }
187
188 reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
189 if (ld->port == UP_PORT) {
190 HiethWrite(ld, reg, GLB_HOSTMAC_L32);
191 } else {
192 HiethWrite(ld, reg, GLB_DN_HOSTMAC_L32);
193 }
194 return HDF_SUCCESS;
195 }
196
HiethHwGetMacAddress(struct HiethNetdevLocal * ld,uint8_t * mac)197 int32_t HiethHwGetMacAddress(struct HiethNetdevLocal *ld, uint8_t *mac)
198 {
199 unsigned long reg;
200
201 if (ld->port == UP_PORT) {
202 reg = HiethRead(ld, GLB_HOSTMAC_H16);
203 } else {
204 reg = HiethRead(ld, GLB_DN_HOSTMAC_H16);
205 }
206 mac[0] = (reg >> 8) & 0xff;
207 mac[1] = reg & 0xff;
208
209 if (ld->port == UP_PORT) {
210 reg = HiethRead(ld, GLB_HOSTMAC_L32);
211 } else {
212 reg = HiethRead(ld, GLB_DN_HOSTMAC_L32);
213 }
214 mac[2] = (reg >> 24) & 0xff;
215 mac[3] = (reg >> 16) & 0xff;
216 mac[4] = (reg >> 8) & 0xff;
217 mac[5] = reg & 0xff;
218 return HDF_SUCCESS;
219 }
220
TestXmitQueueReady(struct HiethNetdevLocal * ld)221 int32_t TestXmitQueueReady(struct HiethNetdevLocal *ld)
222 {
223 return HiethReadlBits(ld, UD_REG_NAME(GLB_RO_QUEUE_STAT), BITS_XMITQ_RDY);
224 }
225
HiethIrqEnable(struct HiethNetdevLocal * ld,int32_t irqs)226 int32_t HiethIrqEnable(struct HiethNetdevLocal *ld, int32_t irqs)
227 {
228 int32_t old;
229
230 OsalSpinLockIrq(&hiethGlbRegLock);
231 old = IrqEnable(ld, irqs);
232 OsalSpinUnlockIrq(&hiethGlbRegLock);
233 return old;
234 }
235
HiethIrqDisable(struct HiethNetdevLocal * ld,int32_t irqs)236 int32_t HiethIrqDisable(struct HiethNetdevLocal *ld, int32_t irqs)
237 {
238 int32_t old;
239
240 OsalSpinLockIrq(&hiethGlbRegLock);
241 old = IrqDisable(ld, irqs);
242 OsalSpinUnlockIrq(&hiethGlbRegLock);
243 return old;
244 }
245
HiethReadIrqstatus(struct HiethNetdevLocal * ld)246 int32_t HiethReadIrqstatus(struct HiethNetdevLocal *ld)
247 {
248 return ReadIrqstatus(ld);
249 }
250
HiethClearIrqstatus(struct HiethNetdevLocal * ld,int32_t irqs)251 int32_t HiethClearIrqstatus(struct HiethNetdevLocal *ld, int32_t irqs)
252 {
253 int32_t status;
254
255 OsalSpinLockIrq(&hiethGlbRegLock);
256 HiethWrite(ld, irqs, GLB_RW_IRQ_RAW);
257 status = ReadIrqstatus(ld);
258 OsalSpinUnlockIrq(&hiethGlbRegLock);
259 return status;
260 }
261
HiethSetEndianMode(struct HiethNetdevLocal * ld,int32_t mode)262 int32_t HiethSetEndianMode(struct HiethNetdevLocal *ld, int32_t mode)
263 {
264 int32_t old;
265
266 old = HiethReadlBits(ld, GLB_ENDIAN_MOD, BITS_ENDIAN);
267 HiethWritelBits(ld, mode, GLB_ENDIAN_MOD, BITS_ENDIAN);
268 return old;
269 }
270
HiethSetHwqDepth(struct HiethNetdevLocal * ld)271 int32_t HiethSetHwqDepth(struct HiethNetdevLocal *ld)
272 {
273 HiethAssert(ld->depth.hwXmitq > 0 && ld->depth.hwXmitq <= HIETH_MAX_QUEUE_DEPTH);
274 if ((ld->depth.hwXmitq) > HIETH_MAX_QUEUE_DEPTH) {
275 BUG();
276 return HDF_FAILURE;
277 }
278 HiethWritelBits(ld, ld->depth.hwXmitq, UD_REG_NAME(GLB_QLEN_SET), BITS_TXQ_DEP);
279 HiethWritelBits(ld, HIETH_MAX_QUEUE_DEPTH - ld->depth.hwXmitq, UD_REG_NAME(GLB_QLEN_SET), BITS_RXQ_DEP);
280 return HDF_SUCCESS;
281 }
282
HiethXmitReleasePkt(struct HiethNetdevLocal * ld,const HiethPriv * priv)283 int32_t HiethXmitReleasePkt(struct HiethNetdevLocal *ld, const HiethPriv *priv)
284 {
285 int32_t ret = 0;
286 struct TxPktInfo *txqCur = NULL;
287 int32_t txReclaimCnt = 0;
288 struct PbufInfo *pbuf = NULL;
289
290 OsalSpinLockIrq(&(ld->tx_lock));
291
292 while (HwXmitqCntInUse(ld) < ld->txHwCnt) {
293 HiethAssert(ld->txHwCnt);
294
295 txqCur = ld->txq + ld->txqTail;
296 if (txqCur->txAddr == 0) {
297 HDF_LOGE("%s: txAddr is invalid.", __func__);
298 }
299 pbuf = priv->ram->pbufInfo + ld->txqTail;
300 if (pbuf->sgLen != 1) {
301 HDF_LOGE("%s: pbuf info sg len is not 1.", __func__);
302 }
303 pbuf->dmaInfo[0] = NULL;
304 NetBufFree(pbuf->buf);
305
306 txqCur->txAddr = 0;
307
308 ld->txqTail++;
309 if (ld->txqTail == ld->qSize) {
310 ld->txqTail = 0;
311 }
312
313 txReclaimCnt++;
314 ld->txHwCnt--;
315 }
316
317 if (txReclaimCnt && ld->txBusy) {
318 ld->txBusy = 0;
319 struct HiethPlatformData *hiethPlatformData = GetHiethPlatformData();
320 LOS_EventWrite(&(hiethPlatformData[priv->index].stEvent), EVENT_NET_CAN_SEND);
321 }
322
323 OsalSpinUnlockIrq(&(ld->tx_lock));
324 return ret;
325 }
326
HiethXmitGso(struct HiethNetdevLocal * ld,const HiethPriv * priv,NetBuf * netBuf)327 int32_t HiethXmitGso(struct HiethNetdevLocal *ld, const HiethPriv *priv, NetBuf *netBuf)
328 {
329 struct TxPktInfo *txqCur = NULL;
330 int32_t sendPktLen, sgLen;
331 struct PbufInfo *pbInfo = NULL;
332
333 if (netBuf == NULL) {
334 HDF_LOGE("%sL netBuf is NULL", __func__);
335 return HDF_FAILURE;
336 }
337
338 sendPktLen = NetBufGetDataLen(netBuf);
339 if (sendPktLen > HIETH_MAX_FRAME_SIZE) {
340 HDF_LOGE("%s: xmit error len=%d", __func__, sendPktLen);
341 }
342
343 pbInfo = &(priv->ram->pbufInfo[ld->txqHead]);
344 sgLen = 0;
345 pbInfo->dmaInfo[sgLen] = (void *)NetBufGetAddress(netBuf, E_DATA_BUF);
346 sgLen++;
347 pbInfo->sgLen = sgLen;
348 pbInfo->buf = netBuf;
349
350 txqCur = ld->txq + ld->txqHead;
351 txqCur->tx.val = 0;
352
353 /* default config, default closed checksum offload function */
354 txqCur->tx.info.tsoFlag = HIETH_CSUM_DISABLE;
355 txqCur->tx.info.coeFlag = HIETH_CSUM_DISABLE;
356 if (sgLen == 1) {
357 txqCur->tx.info.sgFlag = 0;
358 NetDmaCacheClean((void *)NetBufGetAddress(netBuf, E_DATA_BUF), sendPktLen);
359 txqCur->txAddr = (uintptr_t)NetBufGetAddress(netBuf, E_DATA_BUF);
360 } else {
361 HDF_LOGE("sg len is not 1");
362 NetBufFree(netBuf);
363 return HDF_FAILURE;
364 }
365
366 txqCur->tx.info.dataLen = sendPktLen + FCS_BYTES;
367
368 HwXmitqPkg(ld, VMM_TO_DMA_ADDR(txqCur->txAddr), txqCur->tx.val);
369 ld->txqHead++;
370 if (ld->txqHead == ld->qSize) {
371 ld->txqHead = 0;
372 }
373 return HDF_SUCCESS;
374 }
375
HiethFeedHw(struct HiethNetdevLocal * ld,HiethPriv * priv)376 int32_t HiethFeedHw(struct HiethNetdevLocal *ld, HiethPriv *priv)
377 {
378 int32_t cnt = 0;
379 NetBuf *netBuf = NULL;
380 uint32_t rxFeedNext;
381
382 OsalSpinLockIrq(&(ld->rx_lock));
383
384 while (HiethReadlBits(ld, UD_REG_NAME(GLB_RO_QUEUE_STAT), BITS_RECVQ_RDY)) {
385 rxFeedNext = priv->rxFeed + 1;
386 if (rxFeedNext == HIETH_HWQ_RXQ_DEPTH) {
387 rxFeedNext = 0;
388 }
389 if (rxFeedNext == priv->rxRelease) {
390 break;
391 }
392
393 netBuf = NetBufAlloc(ALIGN(HIETH_MAX_FRAME_SIZE + ETH_PAD_SIZE + CACHE_ALIGNED_SIZE, CACHE_ALIGNED_SIZE));
394 if (netBuf == NULL) {
395 HDF_LOGE("%sL netBuf alloc fail", __func__);
396 break;
397 }
398
399 /* drop some bytes for making alignment of net dma cache */
400 netBuf->bufs[E_DATA_BUF].offset += (ALIGN((uintptr_t)NetBufGetAddress(netBuf, E_DATA_BUF),
401 CACHE_ALIGNED_SIZE) - (uintptr_t)NetBufGetAddress(netBuf, E_DATA_BUF));
402
403 #if ETH_PAD_SIZE
404 /* drop the padding word */
405 netBuf->bufs[E_DATA_BUF].offset += ETH_PAD_SIZE;
406 #endif
407
408 priv->ram->rxNetbuf[priv->rxFeed] = netBuf;
409 NetDmaCacheInv(NetBufGetAddress(netBuf, E_DATA_BUF), HIETH_MAX_FRAME_SIZE);
410
411 HiethWrite(ld, VMM_TO_DMA_ADDR((UINTPTR)NetBufGetAddress(netBuf, E_DATA_BUF)), UD_REG_NAME(GLB_IQ_ADDR));
412 priv->rxFeed = rxFeedNext;
413 cnt++;
414 }
415
416 OsalSpinUnlockIrq(&(ld->rx_lock));
417 return cnt;
418 }
419