• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Allwinner GMAC driver.
3 *
4 * Copyright(c) 2022-2027 Allwinnertech Co., Ltd.
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2.  This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10 
11 #include <linux/kernel.h>
12 #include <linux/ctype.h>
13 #include <linux/printk.h>
14 #include <linux/io.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/reset.h>
18 #include <linux/mii.h>
19 #include <linux/crc32.h>
20 #include <linux/skbuff.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/platform_device.h>
25 #include <linux/pinctrl/consumer.h>
26 #include <linux/pinctrl/pinctrl.h>
27 #include <linux/etherdevice.h>
28 #include <linux/netdevice.h>
29 #include <linux/phy.h>
30 #include <linux/init.h>
31 #include <linux/crypto.h>
32 #include <crypto/algapi.h>
33 #include <crypto/hash.h>
34 #include <linux/err.h>
35 #include <linux/scatterlist.h>
36 #include <linux/regulator/consumer.h>
37 #include <linux/of_net.h>
38 #include <linux/of_mdio.h>
39 #ifdef CONFIG_AW_EPHY_AC300
40 #include <linux/pwm.h>
41 #endif /* CONFIG_AW_EPHY_AC300 */
42 
43 #define SUNXI_GMAC_MODULE_VERSION    "2.0.0"
44 #define SUNXI_GMAC_POWER_CHAN_NUM    3
45 #define    SUNXI_GMAC_POWER_CHAR_LENGTH    20
46 
47 #define SUNXI_GMAC_DMA_DESC_RX        256
48 #define SUNXI_GMAC_DMA_DESC_TX        256
49 #define SUNXI_GMAC_BUDGET        (sunxi_gmac_dma_desc_rx / 4)
50 #define SUNXI_GMAC_TX_THRESH        (sunxi_gmac_dma_desc_tx / 4)
51 
52 #define SUNXI_GMAC_HASH_TABLE_SIZE    64
53 #define SUNXI_GMAC_MAX_BUF_SZ        (SZ_2K - 1)
54 
55 /* SUNXI_GMAC_FRAME_FILTER  register value */
56 #define SUNXI_GMAC_FRAME_FILTER_PR    0x00000001    /* Promiscuous Mode */
57 #define SUNXI_GMAC_FRAME_FILTER_HUC    0x00000002    /* Hash Unicast */
58 #define SUNXI_GMAC_FRAME_FILTER_HMC    0x00000004    /* Hash Multicast */
59 #define SUNXI_GMAC_FRAME_FILTER_DAIF    0x00000008    /* DA Inverse Filtering */
60 #define SUNXI_GMAC_FRAME_FILTER_PM    0x00000010    /* Pass all multicast */
61 #define SUNXI_GMAC_FRAME_FILTER_DBF    0x00000020    /* Disable Broadcast frames */
62 #define SUNXI_GMAC_FRAME_FILTER_SAIF    0x00000100    /* Inverse Filtering */
63 #define SUNXI_GMAC_FRAME_FILTER_SAF    0x00000200    /* Source Address Filter */
64 #define SUNXI_GMAC_FRAME_FILTER_HPF    0x00000400    /* Hash or perfect Filter */
65 #define SUNXI_GMAC_FRAME_FILTER_RA    0x80000000    /* Receive all mode */
66 
67 /* Default tx descriptor */
68 #define SUNXI_GMAC_TX_SINGLE_DESC0    0x80000000
69 #define SUNXI_GMAC_TX_SINGLE_DESC1    0x63000000
70 
71 /* Default rx descriptor */
72 #define SUNXI_GMAC_RX_SINGLE_DESC0    0x80000000
73 #define SUNXI_GMAC_RX_SINGLE_DESC1    0x83000000
74 
75 /******************************************************************************
76  *    sunxi gmac reg offset
77  *****************************************************************************/
78 #define SUNXI_GMAC_BASIC_CTL0        0x00
79 #define SUNXI_GMAC_BASIC_CTL1        0x04
80 #define SUNXI_GMAC_INT_STA        0x08
81 #define SUNXI_GMAC_INT_EN        0x0C
82 #define SUNXI_GMAC_TX_CTL0        0x10
83 #define SUNXI_GMAC_TX_CTL1        0x14
84 #define SUNXI_GMAC_TX_FLOW_CTL        0x1C
85 #define SUNXI_GMAC_TX_DESC_LIST        0x20
86 #define SUNXI_GMAC_RX_CTL0        0x24
87 #define SUNXI_GMAC_RX_CTL1        0x28
88 #define SUNXI_GMAC_RX_DESC_LIST        0x34
89 #define SUNXI_GMAC_RX_FRM_FLT        0x38
90 #define SUNXI_GMAC_RX_HASH0        0x40
91 #define SUNXI_GMAC_RX_HASH1        0x44
92 #define SUNXI_GMAC_MDIO_ADDR        0x48
93 #define SUNXI_GMAC_MDIO_DATA        0x4C
94 #define SUNXI_GMAC_ADDR_HI(reg)        (0x50 + ((reg) << 3))
95 #define SUNXI_GMAC_ADDR_LO(reg)        (0x54 + ((reg) << 3))
96 #define SUNXI_GMAC_TX_DMA_STA        0xB0
97 #define SUNXI_GMAC_TX_CUR_DESC        0xB4
98 #define SUNXI_GMAC_TX_CUR_BUF        0xB8
99 #define SUNXI_GMAC_RX_DMA_STA        0xC0
100 #define SUNXI_GMAC_RX_CUR_DESC        0xC4
101 #define SUNXI_GMAC_RX_CUR_BUF        0xC8
102 #define SUNXI_GMAC_RGMII_STA        0xD0
103 
104 #define SUNXI_GMAC_RGMII_IRQ        0x00000001
105 
106 #define SUNXI_GMAC_CTL0_LM        0x02
107 #define SUNXI_GMAC_CTL0_DM        0x01
108 #define SUNXI_GMAC_CTL0_SPEED        0x04
109 
110 #define SUNXI_GMAC_BURST_LEN        0x3F000000
111 #define SUNXI_GMAC_RX_TX_PRI        0x02
112 #define SUNXI_GMAC_SOFT_RST        0x01
113 
114 #define SUNXI_GMAC_TX_FLUSH        0x01
115 #define SUNXI_GMAC_TX_MD        0x02
116 #define SUNXI_GMAC_TX_NEXT_FRM        0x04
117 #define SUNXI_GMAC_TX_TH        0x0700
118 #define SUNXI_GMAC_TX_FLOW_CTL_BIT    0x01
119 
120 #define SUNXI_GMAC_RX_FLUSH        0x01
121 #define SUNXI_GMAC_RX_MD        0x02
122 #define SUNXI_GMAC_RX_RUNT_FRM        0x04
123 #define SUNXI_GMAC_RX_ERR_FRM        0x08
124 #define SUNXI_GMAC_RX_TH        0x0030
125 #define SUNXI_GMAC_RX_FLOW_CTL        0x1000000
126 
127 #define SUNXI_GMAC_TX_INT        0x00001
128 #define SUNXI_GMAC_TX_STOP_INT        0x00002
129 #define SUNXI_GMAC_TX_UA_INT        0x00004
130 #define SUNXI_GMAC_TX_TOUT_INT        0x00008
131 #define SUNXI_GMAC_TX_UNF_INT        0x00010
132 #define SUNXI_GMAC_TX_EARLY_INT        0x00020
133 #define SUNXI_GMAC_RX_INT        0x00100
134 #define SUNXI_GMAC_RX_UA_INT        0x00200
135 #define SUNXI_GMAC_RX_STOP_INT        0x00400
136 #define SUNXI_GMAC_RX_TOUT_INT        0x00800
137 #define SUNXI_GMAC_RX_OVF_INT        0x01000
138 #define SUNXI_GMAC_RX_EARLY_INT        0x02000
139 #define SUNXI_GMAC_LINK_STA_INT        0x10000
140 
141 #define SUNXI_GMAC_CHAIN_MODE_OFFSET    24
142 #define SUNXI_GMAC_LOOPBACK_OFFSET    2
143 #define SUNXI_GMAC_LOOPBACK        0x00000002
144 #define SUNXI_GMAC_CLEAR_SPEED        0x03
145 #define SUNXI_GMAC_1000M_SPEED        ~0x0c
146 #define SUNXI_GMAC_100M_SPEED        0x0c
147 #define SUNXI_GMAC_10M_SPEED        0x08
148 #define SUNXI_GMAC_RX_FLOW_EN        0x10000
149 #define SUNXI_GMAC_TX_FLOW_EN        0x00001
150 #define SUNXI_GMAC_PAUSE_OFFSET        4
151 #define SUNXI_GMAC_INT_OFFSET        0x3fff
152 #define SUNXI_GMAC_RX_DMA_EN        0x40000000
153 #define SUNXI_GMAC_TX_DMA_EN        0x40000000
154 #define SUNXI_GMAC_BURST_VALUE        8
155 #define SUNXI_GMAC_BURST_OFFSET        24
156 #define SUNXI_GMAC_SF_DMA_MODE        1
157 #define SUNXI_GMAC_TX_FRM_LEN_OFFSET    30
158 #define SUNXI_GMAC_CRC_OFFSET        27
159 #define SUNXI_GMAC_STRIP_FCS_OFFSET    28
160 #define SUNXI_GMAC_JUMBO_EN_OFFSET    29
161 #define SUNXI_GMAC_MDC_DIV_RATIO_M    0x03
162 #define SUNXI_GMAC_MDC_DIV_OFFSET    20
163 #define SUNXI_GMAC_TX_DMA_TH64        64
164 #define SUNXI_GMAC_TX_DMA_TH128        128
165 #define SUNXI_GMAC_TX_DMA_TH192        192
166 #define SUNXI_GMAC_TX_DMA_TH256        256
167 #define SUNXI_GMAC_TX_DMA_TH64_VAL    0x00000000
168 #define SUNXI_GMAC_TX_DMA_TH128_VAL    0X00000100
169 #define SUNXI_GMAC_TX_DMA_TH192_VAL    0x00000200
170 #define SUNXI_GMAC_TX_DMA_TH256_VAL    0x00000300
171 #define SUNXI_GMAC_RX_DMA_TH32        32
172 #define SUNXI_GMAC_RX_DMA_TH64        64
173 #define SUNXI_GMAC_RX_DMA_TH96        96
174 #define SUNXI_GMAC_RX_DMA_TH128        128
175 #define SUNXI_GMAC_RX_DMA_TH32_VAL    0x10
176 #define SUNXI_GMAC_RX_DMA_TH64_VAL    0x00
177 #define SUNXI_GMAC_RX_DMA_TH96_VAL    0x20
178 #define SUNXI_GMAC_RX_DMA_TH128_VAL    0x30
179 #define SUNXI_GMAC_TX_DMA_START        31
180 #define SUNXI_GMAC_RX_DMA_START        31
181 #define SUNXI_GMAC_DMA_DESC_BUFSIZE    11
182 #define SUNXI_GMAC_LOOPBACK_OFF        0
183 #define SUNXI_GMAC_MAC_LOOPBACK_ON    1
184 #define SUNXI_GMAC_PHY_LOOPBACK_ON    2
185 #define SUNXI_GMAC_OWN_DMA        0x80000000
186 #define SUNXI_GMAC_GPHY_TEST_OFFSET    13
187 #define SUNXI_GMAC_GPHY_TEST_MASK    0x07
188 #define SUNXI_GMAC_PHY_RGMII_MASK    0x00000004
189 #define SUNXI_GMAC_ETCS_RMII_MASK    0x00002003
190 #define SUNXI_GMAC_RGMII_INTCLK_MASK    0x00000002
191 #define SUNXI_GMAC_RMII_MASK        0x00002000
192 #define SUNXI_GMAC_TX_DELAY_MASK    0x07
193 #define SUNXI_GMAC_TX_DELAY_OFFSET    10
194 #define SUNXI_GMAC_RX_DELAY_MASK    0x1F
195 #define SUNXI_GMAC_RX_DELAY_OFFSET    5
196 /* Flow Control defines */
197 #define SUNXI_GMAC_FLOW_OFF        0
198 #define SUNXI_GMAC_FLOW_RX        1
199 #define SUNXI_GMAC_FLOW_TX        2
200 #define SUNXI_GMAC_FLOW_AUTO        (SUNXI_GMAC_FLOW_TX | SUNXI_GMAC_FLOW_RX)
201 
202 /* Ring buffer caculate method */
203 #define circ_cnt(head, tail, size) (((head) > (tail)) ? \
204                     ((head) - (tail)) : \
205                     ((head) - (tail)) & ((size) - 1))
206 
207 #define circ_space(head, tail, size) circ_cnt((tail), ((head) + 1), (size))
208 
209 #define circ_inc(n, s) (((n) + 1) % (s))
210 
211 #define MAC_ADDR_LEN            18
212 #define SUNXI_GMAC_MAC_ADDRESS        "00:00:00:00:00:00"
213 static char mac_str[MAC_ADDR_LEN] = SUNXI_GMAC_MAC_ADDRESS;
214 module_param_string(mac_str, mac_str, MAC_ADDR_LEN, S_IRUGO | S_IWUSR);
215 MODULE_PARM_DESC(mac_str, "MAC Address String.(xx:xx:xx:xx:xx:xx)");
216 
217 static int rxmode = 1;
218 module_param(rxmode, int, S_IRUGO | S_IWUSR);
219 MODULE_PARM_DESC(rxmode, "DMA threshold control value");
220 
221 static int txmode = 1;
222 module_param(txmode, int, S_IRUGO | S_IWUSR);
223 MODULE_PARM_DESC(txmode, "DMA threshold control value");
224 
225 static int pause = 0x400;
226 module_param(pause, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
228 
229 #define TX_TIMEO    5000
230 static int watchdog = TX_TIMEO;
231 module_param(watchdog, int, S_IRUGO | S_IWUSR);
232 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
233 
234 static int sunxi_gmac_dma_desc_rx = SUNXI_GMAC_DMA_DESC_RX;
235 module_param(sunxi_gmac_dma_desc_rx, int, S_IRUGO | S_IWUSR);
236 MODULE_PARM_DESC(sunxi_gmac_dma_desc_rx, "The number of receive's descriptors");
237 
238 static int sunxi_gmac_dma_desc_tx = SUNXI_GMAC_DMA_DESC_TX;
239 module_param(sunxi_gmac_dma_desc_tx, int, S_IRUGO | S_IWUSR);
240 MODULE_PARM_DESC(sunxi_gmac_dma_desc_tx, "The number of transmit's descriptors");
241 
242 /* - 0: Flow Off
243  * - 1: Rx Flow
244  * - 2: Tx Flow
245  * - 3: Rx & Tx Flow
246  */
247 static int flow_ctrl;
248 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
249 MODULE_PARM_DESC(flow_ctrl, "Flow control [0: off, 1: rx, 2: tx, 3: both]");
250 
251 typedef union {
252     struct {
253         /* TDES0 */
254         unsigned int deferred:1;    /* Deferred bit (only half-duplex) */
255         unsigned int under_err:1;    /* Underflow error */
256         unsigned int ex_deferral:1;    /* Excessive deferral */
257         unsigned int coll_cnt:4;    /* Collision count */
258         unsigned int vlan_tag:1;    /* VLAN Frame */
259         unsigned int ex_coll:1;        /* Excessive collision */
260         unsigned int late_coll:1;    /* Late collision */
261         unsigned int no_carr:1;        /* No carrier */
262         unsigned int loss_carr:1;    /* Loss of collision */
263         unsigned int ipdat_err:1;    /* IP payload error */
264         unsigned int frm_flu:1;        /* Frame flushed */
265         unsigned int jab_timeout:1;    /* Jabber timeout */
266         unsigned int err_sum:1;        /* Error summary */
267         unsigned int iphead_err:1;    /* IP header error */
268         unsigned int ttss:1;        /* Transmit time stamp status */
269         unsigned int reserved0:13;
270         unsigned int own:1;        /* Own bit. CPU:0, DMA:1 */
271     } tx;
272 
273     /* bits 5 7 0 | Frame status
274      * ----------------------------------------------------------
275      *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
276      *      1 0 0 | IPv4/6 No CSUM errorS.
277      *      1 0 1 | IPv4/6 CSUM PAYLOAD error
278      *      1 1 0 | IPv4/6 CSUM IP HR error
279      *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
280      *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
281      *      0 1 1 | COE bypassed.. no IPv4/6 frame
282      *      0 1 0 | Reserved.
283      */
284     struct {
285         /* RDES0 */
286         unsigned int chsum_err:1;    /* Payload checksum error */
287         unsigned int crc_err:1;        /* CRC error */
288         unsigned int dribbling:1;    /* Dribble bit error */
289         unsigned int mii_err:1;        /* Received error (bit3) */
290         unsigned int recv_wt:1;        /* Received watchdog timeout */
291         unsigned int frm_type:1;    /* Frame type */
292         unsigned int late_coll:1;    /* Late Collision */
293         unsigned int ipch_err:1;    /* IPv header checksum error (bit7) */
294         unsigned int last_desc:1;    /* Laset descriptor */
295         unsigned int first_desc:1;    /* First descriptor */
296         unsigned int vlan_tag:1;    /* VLAN Tag */
297         unsigned int over_err:1;    /* Overflow error (bit11) */
298         unsigned int len_err:1;        /* Length error */
299         unsigned int sou_filter:1;    /* Source address filter fail */
300         unsigned int desc_err:1;    /* Descriptor error */
301         unsigned int err_sum:1;        /* Error summary (bit15) */
302         unsigned int frm_len:14;    /* Frame length */
303         unsigned int des_filter:1;    /* Destination address filter fail */
304         unsigned int own:1;        /* Own bit. CPU:0, DMA:1 */
305         #define RX_PKT_OK        0x7FFFB77C
306         #define RX_LEN            0x3FFF0000
307     } rx;
308 
309     unsigned int all;
310 } sunxi_gmac_desc0_u;
311 
312 typedef union {
313     struct {
314         /* TDES1 */
315         unsigned int buf1_size:11;    /* Transmit buffer1 size */
316         unsigned int buf2_size:11;    /* Transmit buffer2 size */
317         unsigned int ttse:1;        /* Transmit time stamp enable */
318         unsigned int dis_pad:1;        /* Disable pad (bit23) */
319         unsigned int adr_chain:1;    /* Second address chained */
320         unsigned int end_ring:1;    /* Transmit end of ring */
321         unsigned int crc_dis:1;        /* Disable CRC */
322         unsigned int cic:2;        /* Checksum insertion control (bit27:28) */
323         unsigned int first_sg:1;    /* First Segment */
324         unsigned int last_seg:1;    /* Last Segment */
325         unsigned int interrupt:1;    /* Interrupt on completion */
326     } tx;
327 
328     struct {
329         /* RDES1 */
330         unsigned int buf1_size:11;    /* Received buffer1 size */
331         unsigned int buf2_size:11;    /* Received buffer2 size */
332         unsigned int reserved1:2;
333         unsigned int adr_chain:1;    /* Second address chained */
334         unsigned int end_ring:1;    /* Received end of ring */
335         unsigned int reserved2:5;
336         unsigned int dis_ic:1;        /* Disable interrupt on completion */
337     } rx;
338 
339     unsigned int all;
340 } sunxi_gmac_desc1_u;
341 
342 typedef struct sunxi_gmac_dma_desc {
343     sunxi_gmac_desc0_u desc0;
344     sunxi_gmac_desc1_u desc1;
345     /* The address of buffers */
346     unsigned int    desc2;
347     /* Next desc's address */
348     unsigned int    desc3;
349 } __attribute__((packed)) sunxi_gmac_dma_desc_t;
350 
351 enum rx_frame_status { /* IPC status */
352     good_frame = 0,
353     discard_frame = 1,
354     csum_none = 2,
355     llc_snap = 4,
356 };
357 
358 enum tx_dma_irq_status {
359     tx_hard_error = 1,
360     tx_hard_error_bump_tc = 2,
361     handle_tx_rx = 3,
362 };
363 
364 struct sunxi_gmac_extra_stats {
365     /* Transmit errors */
366     unsigned long tx_underflow;
367     unsigned long tx_carrier;
368     unsigned long tx_losscarrier;
369     unsigned long vlan_tag;
370     unsigned long tx_deferred;
371     unsigned long tx_vlan;
372     unsigned long tx_jabber;
373     unsigned long tx_frame_flushed;
374     unsigned long tx_payload_error;
375     unsigned long tx_ip_header_error;
376 
377     /* Receive errors */
378     unsigned long rx_desc;
379     unsigned long sa_filter_fail;
380     unsigned long overflow_error;
381     unsigned long ipc_csum_error;
382     unsigned long rx_collision;
383     unsigned long rx_crc;
384     unsigned long dribbling_bit;
385     unsigned long rx_length;
386     unsigned long rx_mii;
387     unsigned long rx_multicast;
388     unsigned long rx_gmac_overflow;
389     unsigned long rx_watchdog;
390     unsigned long da_rx_filter_fail;
391     unsigned long sa_rx_filter_fail;
392     unsigned long rx_missed_cntr;
393     unsigned long rx_overflow_cntr;
394     unsigned long rx_vlan;
395 
396     /* Tx/Rx IRQ errors */
397     unsigned long tx_undeflow_irq;
398     unsigned long tx_process_stopped_irq;
399     unsigned long tx_jabber_irq;
400     unsigned long rx_overflow_irq;
401     unsigned long rx_buf_unav_irq;
402     unsigned long rx_process_stopped_irq;
403     unsigned long rx_watchdog_irq;
404     unsigned long tx_early_irq;
405     unsigned long fatal_bus_error_irq;
406 
407     /* Extra info */
408     unsigned long threshold;
409     unsigned long tx_pkt_n;
410     unsigned long rx_pkt_n;
411     unsigned long poll_n;
412     unsigned long sched_timer_n;
413     unsigned long normal_irq_n;
414 };
415 
416 struct sunxi_gmac {
417     struct sunxi_gmac_dma_desc *dma_tx;    /* Tx dma descriptor */
418     struct sk_buff **tx_skb;        /* Tx socket buffer array */
419     unsigned int tx_clean;            /* Tx ring buffer data consumer */
420     unsigned int tx_dirty;            /* Tx ring buffer data provider */
421     dma_addr_t dma_tx_phy;            /* Tx dma physical address */
422 
423     unsigned long buf_sz;            /* Size of buffer specified by current descriptor */
424 
425     struct sunxi_gmac_dma_desc *dma_rx;    /* Rx dma descriptor */
426     struct sk_buff **rx_skb;            /* Rx socket buffer array */
427     unsigned int rx_clean;            /* Rx ring buffer data consumer */
428     unsigned int rx_dirty;            /* Rx ring buffer data provider */
429     dma_addr_t dma_rx_phy;            /* Rx dma physical address */
430 
431     struct net_device *ndev;
432     struct device *dev;
433     struct napi_struct napi;
434 
435     struct sunxi_gmac_extra_stats xstats;    /* Additional network statistics */
436 
437     bool link;                /* Phy link status */
438     int speed;                /* NIC network speed */
439     int duplex;                /* NIC network duplex capability */
440 
441 #define SUNXI_EXTERNAL_PHY        1
442 #define SUNXI_INTERNAL_PHY        0
443     u32 phy_type;                /* 1: External phy, 0: Internal phy */
444 
445 #define SUNXI_PHY_USE_CLK25M        0    /* External phy use phy25m clk provided by Soc */
446 #define SUNXI_PHY_USE_EXT_OSC        1    /* External phy use extern osc 25m */
447     u32 phy_clk_type;
448 
449     int phy_interface;
450     void __iomem *base;
451     void __iomem *syscfg_base;
452     struct clk *gmac_clk;
453     struct clk *phy25m_clk;
454     struct reset_control *reset;
455     struct pinctrl *pinctrl;
456 
457     struct regulator *gmac_supply[SUNXI_GMAC_POWER_CHAN_NUM];
458     u32 gmac_supply_vol[SUNXI_GMAC_POWER_CHAN_NUM];
459 
460     int phyrst;
461     u8  rst_active_low;
462 
463     /* definition spinlock */
464     spinlock_t universal_lock;        /* universal spinlock */
465     spinlock_t tx_lock;            /* tx tramsmit spinlock */
466 
467     /* adjust transmit clock delay, value: 0~7 */
468     /* adjust receive clock delay, value: 0~31 */
469     u32 tx_delay;
470     u32 rx_delay;
471 
472     struct device_node *phy_node;
473 
474 #ifdef CONFIG_AW_EPHY_AC300
475     struct device_node *ac300_np;
476     struct phy_device *ac300_dev;
477     struct pwm_device *ac300_pwm;
478     u32 pwm_channel;
479 #define PWM_DUTY_NS        205
480 #define PWM_PERIOD_NS        410
481 #endif /* CONFIG_AW_EPHY_AC300 */
482 };
483 
484 /**
485  * sunxi_gmac_desc_init_chain - GMAC dma descriptor chain table initialization
486  *
487  * @desc:    Dma descriptor
488  * @addr:    Dma descriptor physical address
489  * @size:    Dma descriptor numsa
490  *
491  * Called when the NIC is up. We init Tx/Rx dma descriptor table.
492  */
sunxi_gmac_desc_init_chain(struct sunxi_gmac_dma_desc * desc,unsigned long addr,unsigned int size)493 static void sunxi_gmac_desc_init_chain(struct sunxi_gmac_dma_desc *desc, unsigned long addr, unsigned int size)
494 {
495     /* In chained mode the desc3 points to the next element in the ring.
496      * The latest element has to point to the head.
497      */
498     int i;
499     struct sunxi_gmac_dma_desc *p = desc;
500     unsigned long dma_phy = addr;
501 
502     for (i = 0; i < (size - 1); i++) {
503         dma_phy += sizeof(struct sunxi_gmac_dma_desc);
504         p->desc3 = (unsigned int)dma_phy;
505         /* Chain mode */
506         p->desc1.all |= (1 << SUNXI_GMAC_CHAIN_MODE_OFFSET);
507         p++;
508     }
509     p->desc1.all |= (1 << SUNXI_GMAC_CHAIN_MODE_OFFSET);
510     p->desc3 = (unsigned int)addr;
511 }
512 
513 /**
514  * sunxi_gmac_set_link_mode - GMAC speed/duplex set func
515  *
516  * @iobase:    Gmac membase
517  * @duplex:    Duplex capability:half/full
518  * @speed:    Speed:10M/100M/1000M
519  *
520  * Updates phy status and takes action for network queue if required
521  * based upon link status.
522  */
sunxi_gmac_set_link_mode(void * iobase,int duplex,int speed)523 static void sunxi_gmac_set_link_mode(void *iobase, int duplex, int speed)
524 {
525     unsigned int ctrl = readl(iobase + SUNXI_GMAC_BASIC_CTL0);
526 
527     if (!duplex)
528         ctrl &= ~SUNXI_GMAC_CTL0_DM;
529     else
530         ctrl |= SUNXI_GMAC_CTL0_DM;
531 
532     /* clear ctrl speed */
533     ctrl &= SUNXI_GMAC_CLEAR_SPEED;
534 
535     switch (speed) {
536     case 1000:
537         ctrl &= SUNXI_GMAC_1000M_SPEED;
538         break;
539     case 100:
540         ctrl |= SUNXI_GMAC_100M_SPEED;
541         break;
542     case 10:
543         ctrl |= SUNXI_GMAC_10M_SPEED;
544         break;
545     default:
546         break;
547     }
548 
549     writel(ctrl, iobase + SUNXI_GMAC_BASIC_CTL0);
550 }
551 
552 /**
553  * sunxi_gmac_loop - GMAC loopback mode set func
554  *
555  * @iobase:        Gmac membase
556  * @loopback_enable:    Loopback status
557  */
sunxi_gmac_loopback(void * iobase,int loopback_enable)558 static void sunxi_gmac_loopback(void *iobase, int loopback_enable)
559 {
560     int reg;
561 
562     reg = readl(iobase + SUNXI_GMAC_BASIC_CTL0);
563     if (loopback_enable)
564         reg |= SUNXI_GMAC_LOOPBACK_OFFSET;
565     else
566         reg &= ~SUNXI_GMAC_LOOPBACK_OFFSET;
567     writel(reg, iobase + SUNXI_GMAC_BASIC_CTL0);
568 }
569 
570 /**
571  * sunxi_gmac_flow_ctrl - GMAC flow ctrl set func
572  *
573  * @iobase:    Gmac membase
574  * @duolex:    Duplex capability
575  * @fc:        Flow control option
576  * @pause:    Flow control pause time
577  */
sunxi_gmac_flow_ctrl(void * iobase,int duplex,int fc,int pause)578 static void sunxi_gmac_flow_ctrl(void *iobase, int duplex, int fc, int pause)
579 {
580     unsigned int flow;
581 
582     if (fc & SUNXI_GMAC_FLOW_RX) {
583         flow = readl(iobase + SUNXI_GMAC_RX_CTL0);
584         flow |= SUNXI_GMAC_RX_FLOW_EN;
585         writel(flow, iobase + SUNXI_GMAC_RX_CTL0);
586     }
587 
588     if (fc & SUNXI_GMAC_FLOW_TX) {
589         flow = readl(iobase + SUNXI_GMAC_TX_FLOW_CTL);
590         flow |= SUNXI_GMAC_TX_FLOW_EN;
591         writel(flow, iobase + SUNXI_GMAC_TX_FLOW_CTL);
592     }
593 
594     if (duplex) {
595         flow = readl(iobase + SUNXI_GMAC_TX_FLOW_CTL);
596         flow |= (pause << SUNXI_GMAC_PAUSE_OFFSET);
597         writel(flow, iobase + SUNXI_GMAC_TX_FLOW_CTL);
598     }
599 }
600 
601 /**
602  * sunxi_gmac_int_status - GMAC get int status func
603  *
604  * @iobase:    Gmac membase
605  * @x:        Extra statistics
606  */
sunxi_gmac_int_status(void * iobase,struct sunxi_gmac_extra_stats * x)607 static int sunxi_gmac_int_status(void *iobase, struct sunxi_gmac_extra_stats *x)
608 {
609     int ret;
610     /* read the status register (CSR5) */
611     unsigned int intr_status;
612 
613     intr_status = readl(iobase + SUNXI_GMAC_RGMII_STA);
614     if (intr_status & SUNXI_GMAC_RGMII_IRQ)
615         readl(iobase + SUNXI_GMAC_RGMII_STA);
616 
617     intr_status = readl(iobase + SUNXI_GMAC_INT_STA);
618 
619     /* ABNORMAL interrupts */
620     if (intr_status & SUNXI_GMAC_TX_UNF_INT) {
621         ret = tx_hard_error_bump_tc;
622         x->tx_undeflow_irq++;
623     }
624     if (intr_status & SUNXI_GMAC_TX_TOUT_INT)
625         x->tx_jabber_irq++;
626 
627     if (intr_status & SUNXI_GMAC_RX_OVF_INT)
628         x->rx_overflow_irq++;
629 
630     if (intr_status & SUNXI_GMAC_RX_UA_INT)
631         x->rx_buf_unav_irq++;
632 
633     if (intr_status & SUNXI_GMAC_RX_STOP_INT)
634         x->rx_process_stopped_irq++;
635 
636     if (intr_status & SUNXI_GMAC_RX_TOUT_INT)
637         x->rx_watchdog_irq++;
638 
639     if (intr_status & SUNXI_GMAC_TX_EARLY_INT)
640         x->tx_early_irq++;
641 
642     if (intr_status & SUNXI_GMAC_TX_STOP_INT) {
643         x->tx_process_stopped_irq++;
644         ret = tx_hard_error;
645     }
646 
647     /* TX/RX NORMAL interrupts */
648     if (intr_status & (SUNXI_GMAC_TX_INT | SUNXI_GMAC_RX_INT | SUNXI_GMAC_RX_EARLY_INT | SUNXI_GMAC_TX_UA_INT)) {
649         x->normal_irq_n++;
650         if (intr_status & (SUNXI_GMAC_TX_INT | SUNXI_GMAC_RX_INT))
651             ret = handle_tx_rx;
652     }
653     /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
654     writel(intr_status & SUNXI_GMAC_INT_OFFSET, iobase + SUNXI_GMAC_INT_STA);
655 
656     return ret;
657 }
658 
659 /**
660  * sunxi_gmac_enable_rx - enable gmac rx dma
661  *
662  * @iobase:    Gmac membase
663  * @rxbase:    Base address of Rx descriptor
664  */
sunxi_gmac_enable_rx(void * iobase,unsigned long rxbase)665 static void sunxi_gmac_enable_rx(void *iobase, unsigned long rxbase)
666 {
667     unsigned int value;
668 
669     /* Write the base address of Rx descriptor lists into registers */
670     writel(rxbase, iobase + SUNXI_GMAC_RX_DESC_LIST);
671 
672     value = readl(iobase + SUNXI_GMAC_RX_CTL1);
673     value |= SUNXI_GMAC_RX_DMA_EN;
674     writel(value, iobase + SUNXI_GMAC_RX_CTL1);
675 }
676 
677 /**
678  * sunxi_gmac_disable_rx - disable gmac rx dma
679  *
680  * @iobase:    Gmac membase
681  * @rxbase:    Base address of Rx descriptor
682  */
sunxi_gmac_disable_rx(void * iobase)683 static void sunxi_gmac_disable_rx(void *iobase)
684 {
685     unsigned int value;
686 
687     value = readl(iobase + SUNXI_GMAC_RX_CTL1);
688     value &= ~SUNXI_GMAC_RX_DMA_EN;
689     writel(value, iobase + SUNXI_GMAC_RX_CTL1);
690 }
691 
sunxi_gmac_read_rx_flowctl(void * iobase)692 static int sunxi_gmac_read_rx_flowctl(void *iobase)
693 {
694     unsigned int value;
695 
696     value = readl(iobase + SUNXI_GMAC_RX_CTL1);
697 
698     return value & SUNXI_GMAC_RX_FLOW_CTL;
699 }
700 
sunxi_gmac_read_tx_flowctl(void * iobase)701 static int sunxi_gmac_read_tx_flowctl(void *iobase)
702 {
703     unsigned int value;
704 
705     value = readl(iobase + SUNXI_GMAC_TX_FLOW_CTL);
706 
707     return value & SUNXI_GMAC_TX_FLOW_CTL_BIT;
708 }
709 
sunxi_gmac_write_rx_flowctl(void * iobase,bool flag)710 static void sunxi_gmac_write_rx_flowctl(void *iobase, bool flag)
711 {
712     unsigned int value;
713 
714     value = readl(iobase + SUNXI_GMAC_RX_CTL1);
715 
716     if (flag)
717         value |= SUNXI_GMAC_RX_FLOW_CTL;
718     else
719         value &= ~SUNXI_GMAC_RX_FLOW_CTL;
720 
721     writel(value, iobase + SUNXI_GMAC_RX_CTL1);
722 }
723 
sunxi_gmac_write_tx_flowctl(void * iobase,bool flag)724 static void sunxi_gmac_write_tx_flowctl(void *iobase, bool flag)
725 {
726     unsigned int value;
727 
728     value = readl(iobase + SUNXI_GMAC_TX_FLOW_CTL);
729 
730     if (flag)
731         value |= SUNXI_GMAC_TX_FLOW_CTL_BIT;
732     else
733         value &= ~SUNXI_GMAC_TX_FLOW_CTL_BIT;
734 
735     writel(value, iobase + SUNXI_GMAC_TX_FLOW_CTL);
736 }
737 
738 /**
739  * sunxi_gmac_enable_tx - enable gmac tx dma
740  *
741  * @iobase:    Gmac membase
742  * @rxbase:    Base address of Tx descriptor
743  */
sunxi_gmac_enable_tx(void * iobase,unsigned long txbase)744 static void sunxi_gmac_enable_tx(void *iobase, unsigned long txbase)
745 {
746     unsigned int value;
747 
748     /* Write the base address of Tx descriptor lists into registers */
749     writel(txbase, iobase + SUNXI_GMAC_TX_DESC_LIST);
750 
751     value = readl(iobase + SUNXI_GMAC_TX_CTL1);
752     value |= SUNXI_GMAC_TX_DMA_EN;
753     writel(value, iobase + SUNXI_GMAC_TX_CTL1);
754 }
755 
756 /**
757  * sunxi_gmac_disable_tx - disable gmac tx dma
758  *
759  * @iobase:    Gmac membase
760  * @rxbase:    Base address of Tx descriptor
761  */
sunxi_gmac_disable_tx(void * iobase)762 static void sunxi_gmac_disable_tx(void *iobase)
763 {
764     unsigned int value = readl(iobase + SUNXI_GMAC_TX_CTL1);
765 
766     value &= ~SUNXI_GMAC_TX_DMA_EN;
767     writel(value, iobase + SUNXI_GMAC_TX_CTL1);
768 }
769 
sunxi_gmac_dma_init(void * iobase)770 static int sunxi_gmac_dma_init(void *iobase)
771 {
772     unsigned int value;
773 
774     /* Burst should be 8 */
775     value = (SUNXI_GMAC_BURST_VALUE << SUNXI_GMAC_BURST_OFFSET);
776 
777 #ifdef CONFIG_SUNXI_GMAC_DA
778     value |= SUNXI_GMAC_RX_TX_PRI;    /* Rx has priority over tx */
779 #endif
780     writel(value, iobase + SUNXI_GMAC_BASIC_CTL1);
781 
782     /* Mask interrupts by writing to CSR7 */
783     writel(SUNXI_GMAC_RX_INT | SUNXI_GMAC_TX_UNF_INT, iobase + SUNXI_GMAC_INT_EN);
784 
785     return 0;
786 }
787 
788 /**
789  * sunxi_gmac_init - init gmac config
790  *
791  * @iobase:    Gmac membase
792  * @txmode:    tx flow control mode
793  * @rxmode:    rx flow control mode
794  */
sunxi_gmac_init(void * iobase,int txmode,int rxmode)795 static int sunxi_gmac_init(void *iobase, int txmode, int rxmode)
796 {
797     unsigned int value;
798 
799     sunxi_gmac_dma_init(iobase);
800 
801     /* Initialize the core component */
802     value = readl(iobase + SUNXI_GMAC_TX_CTL0);
803     value |= (1 << SUNXI_GMAC_TX_FRM_LEN_OFFSET);
804     writel(value, iobase + SUNXI_GMAC_TX_CTL0);
805 
806     value = readl(iobase + SUNXI_GMAC_RX_CTL0);
807     value |= (1 << SUNXI_GMAC_CRC_OFFSET);            /* Enable CRC & IPv4 Header Checksum */
808     value |= (1 << SUNXI_GMAC_STRIP_FCS_OFFSET);        /* Automatic Pad/CRC Stripping */
809     value |= (1 << SUNXI_GMAC_JUMBO_EN_OFFSET);        /* Jumbo Frame Enable */
810     writel(value, iobase + SUNXI_GMAC_RX_CTL0);
811 
812     writel((SUNXI_GMAC_MDC_DIV_RATIO_M << SUNXI_GMAC_MDC_DIV_OFFSET),
813             iobase + SUNXI_GMAC_MDIO_ADDR);        /* MDC_DIV_RATIO */
814 
815     /* Set the Rx&Tx mode */
816     value = readl(iobase + SUNXI_GMAC_TX_CTL1);
817     if (txmode == SUNXI_GMAC_SF_DMA_MODE) {
818         /* Transmit COE type 2 cannot be done in cut-through mode. */
819         value |= SUNXI_GMAC_TX_MD;
820         /* Operating on second frame increase the performance
821          * especially when transmit store-and-forward is used.
822          */
823         value |= SUNXI_GMAC_TX_NEXT_FRM;
824     } else {
825         value &= ~SUNXI_GMAC_TX_MD;
826         value &= ~SUNXI_GMAC_TX_TH;
827         /* Set the transmit threshold */
828         if (txmode <= SUNXI_GMAC_TX_DMA_TH64)
829             value |= SUNXI_GMAC_TX_DMA_TH64_VAL;
830         else if (txmode <= SUNXI_GMAC_TX_DMA_TH128)
831             value |= SUNXI_GMAC_TX_DMA_TH128_VAL;
832         else if (txmode <= SUNXI_GMAC_TX_DMA_TH192)
833             value |= SUNXI_GMAC_TX_DMA_TH192_VAL;
834         else
835             value |= SUNXI_GMAC_TX_DMA_TH256_VAL;
836     }
837     writel(value, iobase + SUNXI_GMAC_TX_CTL1);
838 
839     value = readl(iobase + SUNXI_GMAC_RX_CTL1);
840     if (rxmode == SUNXI_GMAC_SF_DMA_MODE) {
841         value |= SUNXI_GMAC_RX_MD;
842     } else {
843         value &= ~SUNXI_GMAC_RX_MD;
844         value &= ~SUNXI_GMAC_RX_TH;
845         if (rxmode <= SUNXI_GMAC_RX_DMA_TH32)
846             value |= SUNXI_GMAC_RX_DMA_TH32_VAL;
847         else if (rxmode <= SUNXI_GMAC_RX_DMA_TH64)
848             value |= SUNXI_GMAC_RX_DMA_TH64_VAL;
849         else if (rxmode <= SUNXI_GMAC_RX_DMA_TH96)
850             value |= SUNXI_GMAC_RX_DMA_TH96_VAL;
851         else
852             value |= SUNXI_GMAC_RX_DMA_TH128_VAL;
853     }
854 
855     /* Forward frames with error and undersized good frame. */
856     value |= (SUNXI_GMAC_RX_ERR_FRM | SUNXI_GMAC_RX_RUNT_FRM);
857 
858     writel(value, iobase + SUNXI_GMAC_RX_CTL1);
859 
860     return 0;
861 }
862 
sunxi_gmac_hash_filter(void * iobase,unsigned long low,unsigned long high)863 static void sunxi_gmac_hash_filter(void *iobase, unsigned long low, unsigned long high)
864 {
865     writel(high, iobase + SUNXI_GMAC_RX_HASH0);
866     writel(low, iobase + SUNXI_GMAC_RX_HASH1);
867 }
868 
sunxi_gmac_set_filter(void * iobase,unsigned int flags)869 static void sunxi_gmac_set_filter(void *iobase, unsigned int flags)
870 {
871     int tmp_flags = 0;
872 
873     /* TODO: replace numbers with marcos */
874     tmp_flags |= (((flags >> 9) & 0x00000002) |
875             ((flags << 1) & 0x00000010) |
876             ((flags >> 3) & 0x00000060) |
877             ((flags << 7) & 0x00000300) |
878             ((flags << 6) & 0x00003000) |
879             ((flags << 12) & 0x00030000));
880 
881     writel(tmp_flags, iobase + SUNXI_GMAC_RX_FRM_FLT);
882 }
883 
884 /* write macaddr into MAC register */
sunxi_gmac_set_mac_addr_to_reg(void * iobase,unsigned char * addr,int index)885 static void sunxi_gmac_set_mac_addr_to_reg(void *iobase, unsigned char *addr, int index)
886 {
887     unsigned long data;
888 
889     /* one char is 8bit, so splice mac address in steps of 8 */
890     data = (addr[5] << 8) | addr[4];
891     writel(data, iobase + SUNXI_GMAC_ADDR_HI(index));
892     data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
893     writel(data, iobase + SUNXI_GMAC_ADDR_LO(index));
894 }
895 
sunxi_gmac_dma_start(void * iobase)896 static void sunxi_gmac_dma_start(void *iobase)
897 {
898     unsigned long value;
899 
900     value = readl(iobase + SUNXI_GMAC_TX_CTL0);
901     value |= (1 << SUNXI_GMAC_TX_DMA_START);
902     writel(value, iobase + SUNXI_GMAC_TX_CTL0);
903 
904     value = readl(iobase + SUNXI_GMAC_RX_CTL0);
905     value |= (1 << SUNXI_GMAC_RX_DMA_START);
906     writel(value, iobase + SUNXI_GMAC_RX_CTL0);
907 }
908 
sunxi_gmac_dma_stop(void * iobase)909 static void sunxi_gmac_dma_stop(void *iobase)
910 {
911     unsigned long value;
912 
913     value = readl(iobase + SUNXI_GMAC_TX_CTL0);
914     value &= ~(1 << SUNXI_GMAC_TX_DMA_START);
915     writel(value, iobase + SUNXI_GMAC_TX_CTL0);
916 
917     value = readl(iobase + SUNXI_GMAC_RX_CTL0);
918     value &= ~(1 << SUNXI_GMAC_RX_DMA_START);
919     writel(value, iobase + SUNXI_GMAC_RX_CTL0);
920 }
921 
sunxi_gmac_tx_poll(void * iobase)922 static void sunxi_gmac_tx_poll(void *iobase)
923 {
924     unsigned int value;
925 
926     value = readl(iobase + SUNXI_GMAC_TX_CTL1);
927     writel(value | (1 << SUNXI_GMAC_TX_DMA_START), iobase + SUNXI_GMAC_TX_CTL1);
928 }
929 
sunxi_gmac_rx_poll(void * iobase)930 static void sunxi_gmac_rx_poll(void *iobase)
931 {
932     unsigned int value;
933 
934     value = readl(iobase + SUNXI_GMAC_RX_CTL1);
935     writel(value | (1 << SUNXI_GMAC_RX_DMA_START), iobase + SUNXI_GMAC_RX_CTL1);
936 }
937 
sunxi_gmac_irq_enable(void * iobase)938 static void sunxi_gmac_irq_enable(void *iobase)
939 {
940     writel(SUNXI_GMAC_RX_INT | SUNXI_GMAC_TX_UNF_INT, iobase + SUNXI_GMAC_INT_EN);
941 }
942 
sunxi_gmac_irq_disable(void * iobase)943 static void sunxi_gmac_irq_disable(void *iobase)
944 {
945     writel(0, iobase + SUNXI_GMAC_INT_EN);
946 }
947 
sunxi_gmac_desc_buf_set(struct sunxi_gmac_dma_desc * desc,unsigned long paddr,int size)948 static void sunxi_gmac_desc_buf_set(struct sunxi_gmac_dma_desc *desc, unsigned long paddr, int size)
949 {
950     desc->desc1.all &= (~((1 << SUNXI_GMAC_DMA_DESC_BUFSIZE) - 1));
951     desc->desc1.all |= (size & ((1 << SUNXI_GMAC_DMA_DESC_BUFSIZE) - 1));
952     desc->desc2 = paddr;
953 }
954 
sunxi_gmac_desc_set_own(struct sunxi_gmac_dma_desc * desc)955 static void sunxi_gmac_desc_set_own(struct sunxi_gmac_dma_desc *desc)
956 {
957     desc->desc0.all |= SUNXI_GMAC_OWN_DMA;
958 }
959 
sunxi_gmac_desc_tx_close(struct sunxi_gmac_dma_desc * first,struct sunxi_gmac_dma_desc * end,int csum_insert)960 static void sunxi_gmac_desc_tx_close(struct sunxi_gmac_dma_desc *first,
961     struct sunxi_gmac_dma_desc *end, int csum_insert)
962 {
963     struct sunxi_gmac_dma_desc *desc = first;
964 
965     first->desc1.tx.first_sg = 1;
966     end->desc1.tx.last_seg = 1;
967     end->desc1.tx.interrupt = 1;
968 
969     if (csum_insert)
970         do {
971             desc->desc1.tx.cic = 3;
972             desc++;
973         } while (desc <= end);
974 }
975 
sunxi_gmac_desc_init(struct sunxi_gmac_dma_desc * desc)976 static void sunxi_gmac_desc_init(struct sunxi_gmac_dma_desc *desc)
977 {
978     desc->desc1.all = 0;
979     desc->desc2  = 0;
980     desc->desc1.all |= (1 << SUNXI_GMAC_CHAIN_MODE_OFFSET);
981 }
982 
sunxi_gmac_desc_get_tx_status(struct sunxi_gmac_dma_desc * desc,struct sunxi_gmac_extra_stats * x)983 static int sunxi_gmac_desc_get_tx_status(struct sunxi_gmac_dma_desc *desc, struct sunxi_gmac_extra_stats *x)
984 {
985     int ret = 0;
986 
987     if (desc->desc0.tx.under_err) {
988         x->tx_underflow++;
989         ret = -EIO;
990     }
991 
992     if (desc->desc0.tx.no_carr) {
993         x->tx_carrier++;
994         ret = -EIO;
995     }
996 
997     if (desc->desc0.tx.loss_carr) {
998         x->tx_losscarrier++;
999         ret = -EIO;
1000     }
1001 
1002     if (desc->desc0.tx.deferred) {
1003         x->tx_deferred++;
1004         ret = -EIO;
1005     }
1006 
1007     return ret;
1008 }
1009 
sunxi_gmac_desc_buf_get_len(struct sunxi_gmac_dma_desc * desc)1010 static int sunxi_gmac_desc_buf_get_len(struct sunxi_gmac_dma_desc *desc)
1011 {
1012     return (desc->desc1.all & ((1 << SUNXI_GMAC_DMA_DESC_BUFSIZE) - 1));
1013 }
1014 
sunxi_gmac_desc_buf_get_addr(struct sunxi_gmac_dma_desc * desc)1015 static int sunxi_gmac_desc_buf_get_addr(struct sunxi_gmac_dma_desc *desc)
1016 {
1017     return desc->desc2;
1018 }
1019 
sunxi_gmac_desc_rx_frame_len(struct sunxi_gmac_dma_desc * desc)1020 static int sunxi_gmac_desc_rx_frame_len(struct sunxi_gmac_dma_desc *desc)
1021 {
1022     return desc->desc0.rx.frm_len;
1023 }
1024 
sunxi_gmac_desc_llc_snap(struct sunxi_gmac_dma_desc * desc)1025 static int sunxi_gmac_desc_llc_snap(struct sunxi_gmac_dma_desc *desc)
1026 {
1027     /* Splice flags as follow:
1028      * bits 5 7 0 | Frame status
1029      * ----------------------------------------------------------
1030      *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
1031      *      1 0 0 | IPv4/6 No CSUM errorS.
1032      *      1 0 1 | IPv4/6 CSUM PAYLOAD error
1033      *      1 1 0 | IPv4/6 CSUM IP HR error
1034      *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
1035      *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
1036      *      0 1 1 | COE bypassed.. no IPv4/6 frame
1037      *      0 1 0 | Reserved.
1038      */
1039     return ((desc->desc0.rx.frm_type << 2 |
1040             desc->desc0.rx.ipch_err << 1 |
1041             desc->desc0.rx.chsum_err) & 0x7);
1042 }
1043 
sunxi_gmac_desc_get_rx_status(struct sunxi_gmac_dma_desc * desc,struct sunxi_gmac_extra_stats * x)1044 static int sunxi_gmac_desc_get_rx_status(struct sunxi_gmac_dma_desc *desc, struct sunxi_gmac_extra_stats *x)
1045 {
1046     int ret = good_frame;
1047 
1048     if (desc->desc0.rx.last_desc == 0) {
1049         return discard_frame;
1050     }
1051 
1052     if (desc->desc0.rx.err_sum) {
1053         if (desc->desc0.rx.desc_err)
1054             x->rx_desc++;
1055 
1056         if (desc->desc0.rx.sou_filter)
1057             x->sa_filter_fail++;
1058 
1059         if (desc->desc0.rx.over_err)
1060             x->overflow_error++;
1061 
1062         if (desc->desc0.rx.ipch_err)
1063             x->ipc_csum_error++;
1064 
1065         if (desc->desc0.rx.late_coll)
1066             x->rx_collision++;
1067 
1068         if (desc->desc0.rx.crc_err)
1069             x->rx_crc++;
1070 
1071         ret = discard_frame;
1072     }
1073 
1074     if (desc->desc0.rx.len_err) {
1075         ret = discard_frame;
1076     }
1077     if (desc->desc0.rx.mii_err) {
1078         ret = discard_frame;
1079     }
1080 
1081     if (ret == good_frame) {
1082         if (sunxi_gmac_desc_llc_snap(desc) == 0)
1083             ret = llc_snap;
1084     }
1085 
1086     return ret;
1087 }
1088 
sunxi_gmac_desc_get_own(struct sunxi_gmac_dma_desc * desc)1089 static int sunxi_gmac_desc_get_own(struct sunxi_gmac_dma_desc *desc)
1090 {
1091     return desc->desc0.all & SUNXI_GMAC_OWN_DMA;
1092 }
1093 
sunxi_gmac_desc_get_tx_last_seg(struct sunxi_gmac_dma_desc * desc)1094 static int sunxi_gmac_desc_get_tx_last_seg(struct sunxi_gmac_dma_desc *desc)
1095 {
1096     return desc->desc1.tx.last_seg;
1097 }
1098 
sunxi_gmac_reset(void * iobase,int n)1099 static int sunxi_gmac_reset(void *iobase, int n)
1100 {
1101     unsigned int value;
1102 
1103     /* gmac software reset */
1104     value = readl(iobase + SUNXI_GMAC_BASIC_CTL1);
1105     value |= SUNXI_GMAC_SOFT_RST;
1106     writel(value, iobase + SUNXI_GMAC_BASIC_CTL1);
1107 
1108     udelay(n);
1109 
1110     return !!(readl(iobase + SUNXI_GMAC_BASIC_CTL1) & SUNXI_GMAC_SOFT_RST);
1111 }
1112 
1113 static int sunxi_gmac_stop(struct net_device *ndev);
1114 
sunxi_gmac_dump_dma_desc(struct sunxi_gmac_dma_desc * desc,int size)1115 static void sunxi_gmac_dump_dma_desc(struct sunxi_gmac_dma_desc *desc, int size)
1116 {
1117 #ifdef DEBUG
1118     int i;
1119 
1120     for (i = 0; i < size; i++) {
1121         u32 *x = (u32 *)(desc + i);
1122 
1123         pr_info("\t%d [0x%08lx]: %08x %08x %08x %08x\n",
1124             i, (unsigned long)(&desc[i]),
1125             x[0], x[1], x[2], x[3]);
1126     }
1127     pr_info("\n");
1128 #endif
1129 }
1130 
sunxi_gmac_extra_tx_stats_show(struct device * dev,struct device_attribute * attr,char * buf)1131 static ssize_t sunxi_gmac_extra_tx_stats_show(struct device *dev,
1132         struct device_attribute *attr, char *buf)
1133 {
1134     struct net_device *ndev = dev_get_drvdata(dev);
1135     struct sunxi_gmac *chip = netdev_priv(ndev);
1136 
1137     return sprintf(buf, "tx_underflow: %lu\ntx_carrier: %lu\n"
1138             "tx_losscarrier: %lu\nvlan_tag: %lu\n"
1139             "tx_deferred: %lu\ntx_vlan: %lu\n"
1140             "tx_jabber: %lu\ntx_frame_flushed: %lu\n"
1141             "tx_payload_error: %lu\ntx_ip_header_error: %lu\n\n",
1142             chip->xstats.tx_underflow, chip->xstats.tx_carrier,
1143             chip->xstats.tx_losscarrier, chip->xstats.vlan_tag,
1144             chip->xstats.tx_deferred, chip->xstats.tx_vlan,
1145             chip->xstats.tx_jabber, chip->xstats.tx_frame_flushed,
1146             chip->xstats.tx_payload_error, chip->xstats.tx_ip_header_error);
1147 }
1148 /* eg: cat extra_tx_stats */
1149 static DEVICE_ATTR(extra_tx_stats, 0444, sunxi_gmac_extra_tx_stats_show, NULL);
1150 
sunxi_gmac_extra_rx_stats_show(struct device * dev,struct device_attribute * attr,char * buf)1151 static ssize_t sunxi_gmac_extra_rx_stats_show(struct device *dev,
1152         struct device_attribute *attr, char *buf)
1153 {
1154     struct net_device *ndev = dev_get_drvdata(dev);
1155     struct sunxi_gmac *chip = netdev_priv(ndev);
1156 
1157     return sprintf(buf, "rx_desc: %lu\nsa_filter_fail: %lu\n"
1158             "overflow_error: %lu\nipc_csum_error: %lu\n"
1159             "rx_collision: %lu\nrx_crc: %lu\n"
1160             "dribbling_bit: %lu\nrx_length: %lu\n"
1161             "rx_mii: %lu\nrx_multicast: %lu\n"
1162             "rx_gmac_overflow: %lu\nrx_watchdog: %lu\n"
1163             "da_rx_filter_fail: %lu\nsa_rx_filter_fail: %lu\n"
1164             "rx_missed_cntr: %lu\nrx_overflow_cntr: %lu\n"
1165             "rx_vlan: %lu\n\n",
1166             chip->xstats.rx_desc, chip->xstats.sa_filter_fail,
1167             chip->xstats.overflow_error, chip->xstats.ipc_csum_error,
1168             chip->xstats.rx_collision, chip->xstats.rx_crc,
1169             chip->xstats.dribbling_bit, chip->xstats.rx_length,
1170             chip->xstats.rx_mii, chip->xstats.rx_multicast,
1171             chip->xstats.rx_gmac_overflow, chip->xstats.rx_length,
1172             chip->xstats.da_rx_filter_fail, chip->xstats.sa_rx_filter_fail,
1173             chip->xstats.rx_missed_cntr, chip->xstats.rx_overflow_cntr,
1174             chip->xstats.rx_vlan);
1175 }
1176 /* eg: cat extra_rx_stats */
1177 static DEVICE_ATTR(extra_rx_stats, 0444, sunxi_gmac_extra_rx_stats_show, NULL);
1178 
sunxi_gmac_gphy_test_show(struct device * dev,struct device_attribute * attr,char * buf)1179 static ssize_t sunxi_gmac_gphy_test_show(struct device *dev,
1180         struct device_attribute *attr, char *buf)
1181 {
1182     return sprintf(buf, "Usage:\necho [0/1/2/3/4] > gphy_test\n"
1183             "0 - Normal Mode\n"
1184             "1 - Transmit Jitter Test\n"
1185             "2 - Transmit Jitter Test(MASTER mode)\n"
1186             "3 - Transmit Jitter Test(SLAVE mode)\n"
1187             "4 - Transmit Distortion Test\n\n");
1188 }
1189 
sunxi_gmac_gphy_test_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1190 static ssize_t sunxi_gmac_gphy_test_store(struct device *dev,
1191         struct device_attribute *attr, const char *buf, size_t count)
1192 {
1193     struct net_device *ndev = dev_get_drvdata(dev);
1194     u16 value, phyreg_val;
1195     int ret;
1196 
1197     phyreg_val = phy_read(ndev->phydev, MII_CTRL1000);
1198 
1199     ret = kstrtou16(buf, 0, &value);
1200     if (ret)
1201         return ret;
1202 
1203     if (value >= 0 && value <= 4) {
1204         phyreg_val &= ~(SUNXI_GMAC_GPHY_TEST_MASK <<
1205                 SUNXI_GMAC_GPHY_TEST_OFFSET);
1206         phyreg_val |= value << SUNXI_GMAC_GPHY_TEST_OFFSET;
1207         phy_write(ndev->phydev, MII_CTRL1000, phyreg_val);
1208         netdev_info(ndev, "Set MII_CTRL1000(0x09) Reg: 0x%x\n", phyreg_val);
1209     } else {
1210         netdev_err(ndev, "Error: Unknown value (%d)\n", value);
1211     }
1212 
1213     return count;
1214 }
1215 /* eg: echo 0 > gphy_test */
1216 static DEVICE_ATTR(gphy_test, 0664, sunxi_gmac_gphy_test_show, sunxi_gmac_gphy_test_store);
1217 
sunxi_gmac_mii_read_show(struct device * dev,struct device_attribute * attr,char * buf)1218 static ssize_t sunxi_gmac_mii_read_show(struct device *dev,
1219         struct device_attribute *attr, char *buf)
1220 {
1221     return sprintf(buf, "Usage:\necho PHYREG > mii_read\n");
1222 }
1223 
sunxi_gmac_mii_read_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1224 static ssize_t sunxi_gmac_mii_read_store(struct device *dev,
1225         struct device_attribute *attr, const char *buf, size_t count)
1226 {
1227     struct net_device *ndev = dev_get_drvdata(dev);
1228     u16 phyreg, phyreg_val;
1229     int ret;
1230 
1231     if (!netif_running(ndev)) {
1232         netdev_err(ndev, "Error: Nic is down\n");
1233         return count;
1234     }
1235 
1236     ret = kstrtou16(buf, 0, &phyreg);
1237     if (ret)
1238         return ret;
1239 
1240     phyreg_val = phy_read(ndev->phydev, phyreg);
1241     netdev_info(ndev, "PHYREG[0x%02x] = 0x%04x\n", phyreg, phyreg_val);
1242     return count;
1243 }
1244 /* eg: echo 0x00 > mii_read; cat mii_read */
1245 static DEVICE_ATTR(mii_read, 0664, sunxi_gmac_mii_read_show, sunxi_gmac_mii_read_store);
1246 
sunxi_gmac_mii_write_show(struct device * dev,struct device_attribute * attr,char * buf)1247 static ssize_t sunxi_gmac_mii_write_show(struct device *dev,
1248         struct device_attribute *attr, char *buf)
1249 {
1250     return sprintf(buf, "Usage:\necho PHYREG PHYVAL > mii_write\n");
1251 }
1252 
sunxi_gmac_mii_write_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1253 static ssize_t sunxi_gmac_mii_write_store(struct device *dev,
1254         struct device_attribute *attr, const char *buf, size_t count)
1255 {
1256     struct net_device *ndev = dev_get_drvdata(dev);
1257     u16 phyreg_val_before, phyreg_val_after;
1258     int i, ret;
1259     /* userspace_cmd[0]: phyreg
1260      * userspace_cmd[1]: phyval
1261      */
1262     u16 userspace_cmd[2] = {0};
1263     char *ptr1 = (char *)buf;
1264     char *ptr2;
1265 
1266     if (!netif_running(ndev)) {
1267         netdev_err(ndev, "Error: Nic is down\n");
1268         return count;
1269     }
1270 
1271     for (i = 0; i < ARRAY_SIZE(userspace_cmd); i++) {
1272         ptr1 = skip_spaces(ptr1);
1273         ptr2 = strchr(ptr1, ' ');
1274         if (ptr2)
1275             *ptr2 = '\0';
1276 
1277         ret = kstrtou16(ptr1, 16, &userspace_cmd[i]);
1278         if (!ptr2 || ret)
1279             break;
1280 
1281         ptr1 = ptr2 + 1;
1282     }
1283 
1284     phyreg_val_before = phy_read(ndev->phydev, userspace_cmd[0]);
1285     phy_write(ndev->phydev, userspace_cmd[0], userspace_cmd[1]);
1286     phyreg_val_after = phy_read(ndev->phydev, userspace_cmd[0]);
1287     netdev_info(ndev, "before PHYREG[0x%02x] = 0x%04x, after PHYREG[0x%02x] = 0x%04x\n",
1288             userspace_cmd[0], phyreg_val_before, userspace_cmd[0], phyreg_val_after);
1289 
1290     return count;
1291 }
1292 /* eg: echo 0x00 0x1234 > mii_write; cat mii_write */
1293 static DEVICE_ATTR(mii_write, 0664, sunxi_gmac_mii_write_show, sunxi_gmac_mii_write_store);
1294 
sunxi_gmac_loopback_show(struct device * dev,struct device_attribute * attr,char * buf)1295 static ssize_t sunxi_gmac_loopback_show(struct device *dev,
1296         struct device_attribute *attr, char *buf)
1297 {
1298     struct net_device *ndev = dev_get_drvdata(dev);
1299     struct sunxi_gmac *chip = netdev_priv(ndev);
1300     int macreg_val;
1301     u16 phyreg_val;
1302 
1303     phyreg_val = phy_read(ndev->phydev, MII_BMCR);
1304     if (phyreg_val & BMCR_LOOPBACK)
1305         netdev_dbg(ndev, "Phy loopback enabled\n");
1306     else
1307         netdev_dbg(ndev, "Phy loopback disabled\n");
1308 
1309     macreg_val = readl(chip->base);
1310     if (macreg_val & SUNXI_GMAC_LOOPBACK)
1311         netdev_dbg(ndev, "Mac loopback enabled\n");
1312     else
1313         netdev_dbg(ndev, "Mac loopback disabled\n");
1314 
1315     return sprintf(buf, "Usage:\necho [0/1/2] > loopback\n"
1316             "0 - Loopback off\n"
1317             "1 - Mac loopback mode\n"
1318             "2 - Phy loopback mode\n");
1319 }
1320 
sunxi_gmac_loopback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1321 static ssize_t sunxi_gmac_loopback_store(struct device *dev,
1322         struct device_attribute *attr, const char *buf, size_t count)
1323 {
1324     struct net_device *ndev = dev_get_drvdata(dev);
1325     struct sunxi_gmac *chip = netdev_priv(ndev);
1326     int phyreg_val, ret;
1327     u16 mode;
1328 
1329     if (!netif_running(ndev)) {
1330         netdev_err(ndev, "Error: eth is down\n");
1331         return count;
1332     }
1333 
1334     ret = kstrtou16(buf, 0, &mode);
1335     if (ret)
1336         return ret;
1337 
1338     switch (mode) {
1339     case SUNXI_GMAC_LOOPBACK_OFF:
1340         sunxi_gmac_loopback(chip->base, 0);
1341         phyreg_val = phy_read(ndev->phydev, MII_BMCR);
1342         phy_write(ndev->phydev, MII_BMCR, phyreg_val & ~BMCR_LOOPBACK);
1343         break;
1344     case SUNXI_GMAC_MAC_LOOPBACK_ON:
1345         phyreg_val = phy_read(ndev->phydev, MII_BMCR);
1346         phy_write(ndev->phydev, MII_BMCR, phyreg_val & ~BMCR_LOOPBACK);
1347         sunxi_gmac_loopback(chip->base, 1);
1348         break;
1349     case SUNXI_GMAC_PHY_LOOPBACK_ON:
1350         sunxi_gmac_loopback(chip->base, 0);
1351         phyreg_val = phy_read(ndev->phydev, MII_BMCR);
1352         phy_write(ndev->phydev, MII_BMCR, phyreg_val | BMCR_LOOPBACK);
1353         break;
1354     default:
1355         netdev_err(ndev, "Error: Please echo right value\n");
1356         break;
1357     }
1358 
1359     return count;
1360 }
1361 /* eg: echo 1 > loopback */
1362 static DEVICE_ATTR(loopback, 0664, sunxi_gmac_loopback_show, sunxi_gmac_loopback_store);
1363 
1364 /* In phy state machine, we use this func to change link status */
sunxi_gmac_adjust_link(struct net_device * ndev)1365 static void sunxi_gmac_adjust_link(struct net_device *ndev)
1366 {
1367     struct sunxi_gmac *chip = netdev_priv(ndev);
1368     struct phy_device *phydev = ndev->phydev;
1369     unsigned long flags;
1370     int new_state = 0;
1371 
1372     if (!phydev)
1373         return;
1374 
1375     spin_lock_irqsave(&chip->universal_lock, flags);
1376     if (phydev->link) {
1377         /* Now we make sure that we can be in full duplex mode.
1378          * If not, we operate in half-duplex mode.
1379          */
1380         if (phydev->duplex != chip->duplex) {
1381             new_state = 1;
1382             chip->duplex = phydev->duplex;
1383         }
1384         /* Flow Control operation */
1385         if (phydev->pause)
1386             sunxi_gmac_flow_ctrl(chip->base, phydev->duplex,
1387                     flow_ctrl, pause);
1388 
1389         if (phydev->speed != chip->speed) {
1390             new_state = 1;
1391             chip->speed = phydev->speed;
1392         }
1393 
1394         if (chip->link == 0) {
1395             new_state = 1;
1396             chip->link = phydev->link;
1397         }
1398 
1399         if (new_state)
1400             sunxi_gmac_set_link_mode(chip->base, chip->duplex, chip->speed);
1401 
1402     } else if (chip->link != phydev->link) {
1403         new_state = 1;
1404         chip->link = 0;
1405         chip->speed = 0;
1406         chip->duplex = -1;
1407     }
1408     spin_unlock_irqrestore(&chip->universal_lock, flags);
1409 
1410     if (new_state)
1411         phy_print_status(phydev);
1412 }
1413 
sunxi_gmac_phy_release(struct net_device * ndev)1414 static int sunxi_gmac_phy_release(struct net_device *ndev)
1415 {
1416     struct sunxi_gmac *chip = netdev_priv(ndev);
1417     struct phy_device *phydev = ndev->phydev;
1418     int value;
1419 
1420     /* Stop and disconnect the PHY */
1421     if (phydev)
1422         phy_stop(phydev);
1423 
1424     chip->link = PHY_DOWN;
1425     chip->speed = 0;
1426     chip->duplex = -1;
1427 
1428     if (phydev) {
1429         value = phy_read(phydev, MII_BMCR);
1430         phy_write(phydev, MII_BMCR, (value | BMCR_PDOWN));
1431     }
1432 
1433     if (phydev) {
1434         phy_disconnect(phydev);
1435         ndev->phydev = NULL;
1436     }
1437 
1438     return 0;
1439 }
1440 
1441 /* Refill rx dma descriptor after using */
sunxi_gmac_rx_refill(struct net_device * ndev)1442 static void sunxi_gmac_rx_refill(struct net_device *ndev)
1443 {
1444     struct sunxi_gmac *chip = netdev_priv(ndev);
1445     struct sunxi_gmac_dma_desc *desc;
1446     struct sk_buff *skb = NULL;
1447     dma_addr_t dma_addr;
1448 
1449     while (circ_space(chip->rx_clean, chip->rx_dirty, sunxi_gmac_dma_desc_rx) > 0) {
1450         int entry = chip->rx_clean;
1451 
1452         /* Find the dirty's desc and clean it */
1453         desc = chip->dma_rx + entry;
1454 
1455         if (chip->rx_skb[entry] == NULL) {
1456             skb = netdev_alloc_skb_ip_align(ndev, chip->buf_sz);
1457 
1458             if (unlikely(skb == NULL))
1459                 break;
1460 
1461             chip->rx_skb[entry] = skb;
1462             dma_addr = dma_map_single(chip->dev, skb->data,
1463                            chip->buf_sz, DMA_FROM_DEVICE);
1464             sunxi_gmac_desc_buf_set(desc, dma_addr, chip->buf_sz);
1465         }
1466 
1467         /* sync memery */
1468         wmb();
1469         sunxi_gmac_desc_set_own(desc);
1470         chip->rx_clean = circ_inc(chip->rx_clean, sunxi_gmac_dma_desc_rx);
1471     }
1472 }
1473 
1474 /*
1475  * sunxi_gmac_dma_desc_init - initialize the RX/TX descriptor list
1476  *
1477  * @ndev: net device structure
1478  * Description: initialize the list for dma.
1479  */
sunxi_gmac_dma_desc_init(struct net_device * ndev)1480 static int sunxi_gmac_dma_desc_init(struct net_device *ndev)
1481 {
1482     struct sunxi_gmac *chip = netdev_priv(ndev);
1483     struct device *dev = &ndev->dev;
1484 
1485     chip->rx_skb = devm_kzalloc(dev, sizeof(chip->rx_skb[0]) * sunxi_gmac_dma_desc_rx,
1486                 GFP_KERNEL);
1487     if (!chip->rx_skb) {
1488         netdev_err(ndev, "Error: Alloc rx_skb failed\n");
1489         goto rx_skb_err;
1490     }
1491     chip->tx_skb = devm_kzalloc(dev, sizeof(chip->tx_skb[0]) * sunxi_gmac_dma_desc_tx,
1492                 GFP_KERNEL);
1493     if (!chip->tx_skb) {
1494         netdev_err(ndev, "Error: Alloc tx_skb failed\n");
1495         goto tx_skb_err;
1496     }
1497 
1498     chip->dma_tx = dma_alloc_coherent(chip->dev,
1499                     sunxi_gmac_dma_desc_tx *
1500                     sizeof(struct sunxi_gmac_dma_desc),
1501                     &chip->dma_tx_phy,
1502                     GFP_KERNEL);
1503     if (!chip->dma_tx) {
1504         netdev_err(ndev, "Error: Alloc dma_tx failed\n");
1505         goto dma_tx_err;
1506     }
1507 
1508     chip->dma_rx = dma_alloc_coherent(chip->dev,
1509                     sunxi_gmac_dma_desc_rx *
1510                     sizeof(struct sunxi_gmac_dma_desc),
1511                     &chip->dma_rx_phy,
1512                     GFP_KERNEL);
1513     if (!chip->dma_rx) {
1514         netdev_err(ndev, "Error: Alloc dma_rx failed\n");
1515         goto dma_rx_err;
1516     }
1517 
1518     /* Set the size of buffer depend on the MTU & max buf size */
1519     chip->buf_sz = SUNXI_GMAC_MAX_BUF_SZ;
1520     return 0;
1521 
1522 dma_rx_err:
1523     dma_free_coherent(chip->dev, sunxi_gmac_dma_desc_rx * sizeof(struct sunxi_gmac_dma_desc),
1524               chip->dma_tx, chip->dma_tx_phy);
1525 dma_tx_err:
1526     kfree(chip->tx_skb);
1527 tx_skb_err:
1528     kfree(chip->rx_skb);
1529 rx_skb_err:
1530     return -ENOMEM;
1531 }
1532 
sunxi_gmac_free_rx_skb(struct sunxi_gmac * chip)1533 static void sunxi_gmac_free_rx_skb(struct sunxi_gmac *chip)
1534 {
1535     int i;
1536 
1537     for (i = 0; i < sunxi_gmac_dma_desc_rx; i++) {
1538         if (chip->rx_skb[i] != NULL) {
1539             struct sunxi_gmac_dma_desc *desc = chip->dma_rx + i;
1540 
1541             dma_unmap_single(chip->dev, (u32)sunxi_gmac_desc_buf_get_addr(desc),
1542                      sunxi_gmac_desc_buf_get_len(desc),
1543                      DMA_FROM_DEVICE);
1544             dev_kfree_skb_any(chip->rx_skb[i]);
1545             chip->rx_skb[i] = NULL;
1546         }
1547     }
1548 }
1549 
sunxi_gmac_free_tx_skb(struct sunxi_gmac * chip)1550 static void sunxi_gmac_free_tx_skb(struct sunxi_gmac *chip)
1551 {
1552     int i;
1553 
1554     for (i = 0; i < sunxi_gmac_dma_desc_tx; i++) {
1555         if (chip->tx_skb[i] != NULL) {
1556             struct sunxi_gmac_dma_desc *desc = chip->dma_tx + i;
1557 
1558             if (sunxi_gmac_desc_buf_get_addr(desc))
1559                 dma_unmap_single(chip->dev, (u32)sunxi_gmac_desc_buf_get_addr(desc),
1560                          sunxi_gmac_desc_buf_get_len(desc),
1561                          DMA_TO_DEVICE);
1562             dev_kfree_skb_any(chip->tx_skb[i]);
1563             chip->tx_skb[i] = NULL;
1564         }
1565     }
1566 }
1567 
sunxi_gmac_dma_desc_deinit(struct sunxi_gmac * chip)1568 static void sunxi_gmac_dma_desc_deinit(struct sunxi_gmac *chip)
1569 {
1570     /* Free the region of consistent memory previously allocated for the DMA */
1571     dma_free_coherent(chip->dev, sunxi_gmac_dma_desc_tx * sizeof(struct sunxi_gmac_dma_desc),
1572               chip->dma_tx, chip->dma_tx_phy);
1573     dma_free_coherent(chip->dev, sunxi_gmac_dma_desc_rx * sizeof(struct sunxi_gmac_dma_desc),
1574               chip->dma_rx, chip->dma_rx_phy);
1575 
1576     kfree(chip->rx_skb);
1577     kfree(chip->tx_skb);
1578 }
1579 
sunxi_gmac_select_gpio_state(struct pinctrl * pctrl,char * name)1580 static int sunxi_gmac_select_gpio_state(struct pinctrl *pctrl, char *name)
1581 {
1582     int ret;
1583     struct pinctrl_state *pctrl_state;
1584 
1585     pctrl_state = pinctrl_lookup_state(pctrl, name);
1586     if (IS_ERR(pctrl_state)) {
1587         pr_err("gmac pinctrl_lookup_state(%s) failed! return %p\n",
1588                         name, pctrl_state);
1589         return -EINVAL;
1590     }
1591 
1592     ret = pinctrl_select_state(pctrl, pctrl_state);
1593     if (ret < 0)
1594         pr_err("gmac pinctrl_select_state(%s) failed! return %d\n",
1595                         name, ret);
1596 
1597     return ret;
1598 }
1599 
sunxi_gmac_stop(struct net_device * ndev)1600 static int sunxi_gmac_stop(struct net_device *ndev)
1601 {
1602     struct sunxi_gmac *chip = netdev_priv(ndev);
1603 
1604     netif_stop_queue(ndev);
1605     napi_disable(&chip->napi);
1606 
1607     netif_carrier_off(ndev);
1608 
1609     sunxi_gmac_phy_release(ndev);
1610 
1611     sunxi_gmac_dma_stop(chip->base);
1612 
1613     netif_tx_lock_bh(ndev);
1614     /* Release the DMA TX/RX socket buffers */
1615     sunxi_gmac_free_rx_skb(chip);
1616     sunxi_gmac_free_tx_skb(chip);
1617     netif_tx_unlock_bh(ndev);
1618 
1619     return 0;
1620 }
1621 
sunxi_gmac_power_on(struct sunxi_gmac * chip)1622 static int sunxi_gmac_power_on(struct sunxi_gmac *chip)
1623 {
1624     int i, value;
1625 
1626     value = readl(chip->syscfg_base);
1627 
1628     /* syscfg phy reg high 16 bit is unuse */
1629     if (chip->phy_type == SUNXI_EXTERNAL_PHY)
1630         value &= ~(1 << 15);
1631     else
1632         value |= (1 << 15);
1633 
1634     for (i = 0; i < SUNXI_GMAC_POWER_CHAN_NUM; i++) {
1635         if (IS_ERR_OR_NULL(chip->gmac_supply[i]))
1636             continue;
1637 
1638         if (regulator_set_voltage(chip->gmac_supply[i],
1639                 chip->gmac_supply_vol[i],
1640                 chip->gmac_supply_vol[i])) {
1641             pr_err("gmac-power%d set voltage error\n", i);
1642             return -EINVAL;
1643         }
1644 
1645         if (regulator_enable(chip->gmac_supply[i])) {
1646             pr_err("gmac-power%d enable error\n", i);
1647             return -EINVAL;
1648         }
1649     }
1650 
1651     writel(value, chip->syscfg_base);
1652 
1653     return 0;
1654 }
1655 
sunxi_gmac_power_off(struct sunxi_gmac * chip)1656 static void sunxi_gmac_power_off(struct sunxi_gmac *chip)
1657 {
1658     int i;
1659 
1660     for (i = 0; i < SUNXI_GMAC_POWER_CHAN_NUM; i++) {
1661         regulator_disable(chip->gmac_supply[i]);
1662         regulator_put(chip->gmac_supply[i]);
1663     }
1664 }
1665 
1666 /**
1667  * sunxi_gmac_open - GMAC device open
1668  * @ndev: The Allwinner GMAC network adapter
1669  *
1670  * Called when system wants to start the interface. We init TX/RX channels
1671  * and enable the hardware for packet reception/transmission and start the
1672  * network queue.
1673  *
1674  * Returns 0 for a successful open, or appropriate error code
1675  */
sunxi_gmac_open(struct net_device * ndev)1676 static int sunxi_gmac_open(struct net_device *ndev)
1677 {
1678     struct sunxi_gmac *chip = netdev_priv(ndev);
1679     int ret;
1680 
1681     /*
1682      * When changing the configuration of GMAC and PHY,
1683      * it is necessary to turn off the carrier on the link.
1684      */
1685     netif_carrier_off(ndev);
1686 
1687 #ifdef CONFIG_AW_EPHY_AC300
1688     if (chip->ac300_np) {
1689         chip->ac300_dev = of_phy_find_device(chip->ac300_np);
1690         if (!chip->ac300_dev) {
1691             netdev_err(ndev, "Error: Could not find ac300 %s\n",
1692                 chip->ac300_np->full_name);
1693             return -ENODEV;
1694         }
1695         phy_init_hw(chip->ac300_dev);
1696     }
1697 #endif    /* CONFIG_AW_EPHY_AC300 */
1698 
1699     if (chip->phy_node) {
1700         ndev->phydev = of_phy_connect(ndev, chip->phy_node,
1701                     &sunxi_gmac_adjust_link, 0, PHY_INTERFACE_MODE_RMII);
1702         if (!ndev->phydev) {
1703             netdev_err(ndev, "Error: Could not connect to phy %s\n",
1704                 chip->phy_node->full_name);
1705             return -ENODEV;
1706         }
1707         netdev_info(ndev, "%s: Type(%d) PHY ID %08x at %d IRQ %s (%s)\n",
1708             ndev->name, ndev->phydev->interface, ndev->phydev->phy_id,
1709             ndev->phydev->mdio.addr, "poll", dev_name(&ndev->phydev->mdio.dev));
1710     }
1711 
1712     ret = sunxi_gmac_reset((void *)chip->base, 1000);
1713     if (ret) {
1714         netdev_err(ndev, "Error: Mac reset failed, please check phy and mac clk\n");
1715         goto mac_reset_err;
1716     }
1717     sunxi_gmac_init(chip->base, txmode, rxmode);
1718     sunxi_gmac_set_mac_addr_to_reg(chip->base, ndev->dev_addr, 0);
1719 
1720     memset(chip->dma_tx, 0, sunxi_gmac_dma_desc_tx * sizeof(struct sunxi_gmac_dma_desc));
1721     memset(chip->dma_rx, 0, sunxi_gmac_dma_desc_rx * sizeof(struct sunxi_gmac_dma_desc));
1722 
1723     sunxi_gmac_desc_init_chain(chip->dma_rx, (unsigned long)chip->dma_rx_phy, sunxi_gmac_dma_desc_rx);
1724     sunxi_gmac_desc_init_chain(chip->dma_tx, (unsigned long)chip->dma_tx_phy, sunxi_gmac_dma_desc_tx);
1725 
1726     chip->rx_clean = 0;
1727     chip->rx_dirty = 0;
1728     chip->tx_clean = 0;
1729     chip->tx_dirty = 0;
1730     sunxi_gmac_rx_refill(ndev);
1731 
1732     /* Extra statistics */
1733     memset(&chip->xstats, 0, sizeof(struct sunxi_gmac_extra_stats));
1734 
1735     if (ndev->phydev)
1736         phy_start(ndev->phydev);
1737 
1738     sunxi_gmac_enable_rx(chip->base, (unsigned long)((struct sunxi_gmac_dma_desc *)
1739                chip->dma_rx_phy + chip->rx_dirty));
1740     sunxi_gmac_enable_tx(chip->base, (unsigned long)((struct sunxi_gmac_dma_desc *)
1741                chip->dma_tx_phy + chip->tx_clean));
1742 
1743     napi_enable(&chip->napi);
1744     netif_start_queue(ndev);
1745 
1746     /* Start the Rx/Tx */
1747     sunxi_gmac_dma_start(chip->base);
1748 
1749     return 0;
1750 
1751 mac_reset_err:
1752     phy_disconnect(ndev->phydev);
1753     return ret;
1754 }
1755 
1756 #if IS_ENABLED(CONFIG_PM)
sunxi_gmac_resume(struct device * dev)1757 static int sunxi_gmac_resume(struct device *dev)
1758 {
1759     struct net_device *ndev = dev_get_drvdata(dev);
1760     struct sunxi_gmac *chip = netdev_priv(ndev);
1761 
1762     if (!netif_running(ndev))
1763         return 0;
1764 
1765     sunxi_gmac_select_gpio_state(chip->pinctrl, PINCTRL_STATE_DEFAULT);
1766 
1767     netif_device_attach(ndev);
1768 
1769     sunxi_gmac_open(ndev);
1770 
1771     return 0;
1772 }
1773 
sunxi_gmac_suspend(struct device * dev)1774 static int sunxi_gmac_suspend(struct device *dev)
1775 {
1776     struct net_device *ndev = dev_get_drvdata(dev);
1777     struct sunxi_gmac *chip = netdev_priv(ndev);
1778 
1779     if (!ndev || !netif_running(ndev))
1780         return 0;
1781 
1782     netif_device_detach(ndev);
1783 
1784     sunxi_gmac_stop(ndev);
1785 
1786     sunxi_gmac_select_gpio_state(chip->pinctrl, PINCTRL_STATE_SLEEP);
1787 
1788     return 0;
1789 }
1790 
1791 static const struct dev_pm_ops sunxi_gmac_pm_ops = {
1792     .suspend = sunxi_gmac_suspend,
1793     .resume = sunxi_gmac_resume,
1794 };
1795 #else
1796 static const struct dev_pm_ops sunxi_gmac_pm_ops;
1797 #endif /* CONFIG_PM */
1798 
1799 #define sunxi_get_soc_chipid(x) {}
sunxi_gmac_chip_hwaddr(struct net_device * ndev)1800 static void sunxi_gmac_chip_hwaddr(struct net_device *ndev)
1801 {
1802 #define MD5_SIZE    16
1803 #define CHIP_SIZE    16
1804 
1805     struct crypto_ahash *tfm;
1806     struct ahash_request *req;
1807     struct scatterlist sg;
1808     u8 *addr = ndev->dev_addr;
1809     u8 result[MD5_SIZE];
1810     u8 chipid[CHIP_SIZE];
1811     int i, ret;
1812 
1813     memset(chipid, 0, sizeof(chipid));
1814     memset(result, 0, sizeof(result));
1815 
1816     sunxi_get_soc_chipid((u8 *)chipid);
1817 
1818     tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
1819     if (IS_ERR(tfm)) {
1820         netdev_err(ndev, "Error: Alloc md5 failed\n");
1821         return;
1822     }
1823 
1824     req = ahash_request_alloc(tfm, GFP_KERNEL);
1825     if (!req)
1826         goto out;
1827 
1828     ahash_request_set_callback(req, 0, NULL, NULL);
1829 
1830     ret = crypto_ahash_init(req);
1831     if (ret) {
1832         netdev_err(ndev, "Error: Crypto_ahash_init failed\n");
1833         goto out;
1834     }
1835 
1836     sg_init_one(&sg, chipid, sizeof(chipid));
1837     ahash_request_set_crypt(req, &sg, result, sizeof(chipid));
1838     ret = crypto_ahash_update(req);
1839     if (ret) {
1840         netdev_err(ndev, "Error: Crypto_ahash_update failed\n");
1841         goto out;
1842     }
1843 
1844     ret = crypto_ahash_final(req);
1845     if (ret) {
1846         netdev_err(ndev, "Error: Crypto_ahash_final failed\n");
1847         goto out;
1848     }
1849 
1850     ahash_request_free(req);
1851 
1852     /* Choose md5 result's [0][2][4][6][8][10] byte as mac address */
1853     for (i = 0; i < ETH_ALEN; i++)
1854         addr[i] = result[2 * i];
1855     addr[0] &= 0xfe; /* clear multicast bit */
1856     addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
1857 
1858 out:
1859     crypto_free_ahash(tfm);
1860 }
1861 
sunxi_gmac_check_addr(struct net_device * ndev,unsigned char * mac)1862 static void sunxi_gmac_check_addr(struct net_device *ndev, unsigned char *mac)
1863 {
1864     int i;
1865     char *p = mac;
1866 
1867     if (!is_valid_ether_addr(ndev->dev_addr)) {
1868         for (i = 0; i < ETH_ALEN; i++, p++)
1869             ndev->dev_addr[i] = simple_strtoul(p, &p, 16);
1870 
1871         if (!is_valid_ether_addr(ndev->dev_addr))
1872             sunxi_gmac_chip_hwaddr(ndev);
1873 
1874         if (!is_valid_ether_addr(ndev->dev_addr)) {
1875             random_ether_addr(ndev->dev_addr);
1876             netdev_dbg(ndev, "Error: Use random mac address\n");
1877         }
1878     }
1879 }
1880 
sunxi_gmac_clk_enable(struct sunxi_gmac * chip)1881 static int sunxi_gmac_clk_enable(struct sunxi_gmac *chip)
1882 {
1883     struct net_device *ndev = chip->ndev;
1884     int phy_interface, ret;
1885     u32 clk_value;
1886 
1887     ret = reset_control_deassert(chip->reset);
1888     if (ret) {
1889         netdev_err(ndev, "Error: Try to de-assert gmac rst failed\n");
1890         goto gmac_reset_err;
1891     }
1892 
1893     ret = clk_prepare_enable(chip->gmac_clk);
1894     if (ret) {
1895         netdev_err(ndev, "Error: Try to enable gmac_clk failed\n");
1896         goto gmac_clk_err;
1897     }
1898 
1899     if (chip->phy_clk_type == SUNXI_PHY_USE_CLK25M) {
1900         ret = clk_prepare_enable(chip->phy25m_clk);
1901         if (ret) {
1902             netdev_err(ndev, "Error: Try to enable phy25m_clk failed\n");
1903             goto phy25m_clk_err;
1904         }
1905     }
1906 
1907     phy_interface = chip->phy_interface;
1908 
1909     clk_value = readl(chip->syscfg_base);
1910     /* Only support RGMII/RMII */
1911     if (phy_interface == PHY_INTERFACE_MODE_RGMII)
1912         clk_value |= SUNXI_GMAC_PHY_RGMII_MASK;
1913     else
1914         clk_value &= (~SUNXI_GMAC_PHY_RGMII_MASK);
1915 
1916     clk_value &= (~SUNXI_GMAC_ETCS_RMII_MASK);
1917     if (phy_interface == PHY_INTERFACE_MODE_RGMII
1918             || phy_interface == PHY_INTERFACE_MODE_GMII)
1919         clk_value |= SUNXI_GMAC_RGMII_INTCLK_MASK;
1920     else if (phy_interface == PHY_INTERFACE_MODE_RMII)
1921         clk_value |= SUNXI_GMAC_RMII_MASK;
1922 
1923     /*
1924      * Adjust Tx/Rx clock delay
1925      * Tx clock delay: 0~7
1926      * Rx clock delay: 0~31
1927      */
1928     clk_value &= ~(SUNXI_GMAC_TX_DELAY_MASK << SUNXI_GMAC_TX_DELAY_OFFSET);
1929     clk_value |= ((chip->tx_delay & SUNXI_GMAC_TX_DELAY_MASK) << SUNXI_GMAC_TX_DELAY_OFFSET);
1930     clk_value &= ~(SUNXI_GMAC_RX_DELAY_MASK << SUNXI_GMAC_RX_DELAY_OFFSET);
1931     clk_value |= ((chip->rx_delay & SUNXI_GMAC_RX_DELAY_MASK) << SUNXI_GMAC_RX_DELAY_OFFSET);
1932 
1933     writel(clk_value, chip->syscfg_base);
1934 
1935     return 0;
1936 
1937 phy25m_clk_err:
1938     clk_disable(chip->gmac_clk);
1939 gmac_clk_err:
1940     reset_control_assert(chip->reset);
1941 gmac_reset_err:
1942     return ret;
1943 }
1944 
sunxi_gmac_clk_disable(struct sunxi_gmac * chip)1945 static void sunxi_gmac_clk_disable(struct sunxi_gmac *chip)
1946 {
1947     writel(0, chip->syscfg_base);
1948 
1949     if (chip->phy25m_clk)
1950         clk_disable_unprepare(chip->phy25m_clk);
1951 
1952     if (chip->gmac_clk)
1953         clk_disable_unprepare(chip->gmac_clk);
1954 
1955     if (chip->reset)
1956         reset_control_assert(chip->reset);
1957 }
1958 
sunxi_gmac_tx_err(struct sunxi_gmac * chip)1959 static void sunxi_gmac_tx_err(struct sunxi_gmac *chip)
1960 {
1961     netif_stop_queue(chip->ndev);
1962 
1963     sunxi_gmac_disable_tx(chip->base);
1964 
1965     sunxi_gmac_free_tx_skb(chip);
1966     memset(chip->dma_tx, 0, sunxi_gmac_dma_desc_tx * sizeof(struct sunxi_gmac_dma_desc));
1967     sunxi_gmac_desc_init_chain(chip->dma_tx, (unsigned long)chip->dma_tx_phy, sunxi_gmac_dma_desc_tx);
1968     chip->tx_dirty = 0;
1969     chip->tx_clean = 0;
1970     sunxi_gmac_enable_tx(chip->base, chip->dma_tx_phy);
1971 
1972     chip->ndev->stats.tx_errors++;
1973     netif_wake_queue(chip->ndev);
1974 }
1975 
sunxi_gmac_schedule(struct sunxi_gmac * chip)1976 static inline void sunxi_gmac_schedule(struct sunxi_gmac *chip)
1977 {
1978     if (likely(napi_schedule_prep(&chip->napi))) {
1979         sunxi_gmac_irq_disable(chip->base);
1980         __napi_schedule(&chip->napi);
1981     }
1982 }
1983 
sunxi_gmac_interrupt(int irq,void * dev_id)1984 static irqreturn_t sunxi_gmac_interrupt(int irq, void *dev_id)
1985 {
1986     struct net_device *ndev = (struct net_device *)dev_id;
1987     struct sunxi_gmac *chip = netdev_priv(ndev);
1988     int status;
1989 
1990     status = sunxi_gmac_int_status(chip->base, (void *)(&chip->xstats));
1991 
1992     if (likely(status == handle_tx_rx))
1993         sunxi_gmac_schedule(chip);
1994     else if (unlikely(status == tx_hard_error_bump_tc))
1995         netdev_info(ndev, "Do nothing for bump tc\n");
1996     else if (unlikely(status == tx_hard_error))
1997         sunxi_gmac_tx_err(chip);
1998     else
1999         netdev_info(ndev, "Do nothing.....\n");
2000 
2001     return IRQ_HANDLED;
2002 }
2003 
sunxi_gmac_tx_complete(struct sunxi_gmac * chip)2004 static void sunxi_gmac_tx_complete(struct sunxi_gmac *chip)
2005 {
2006     unsigned int entry = 0;
2007     struct sk_buff *skb = NULL;
2008     struct sunxi_gmac_dma_desc *desc = NULL;
2009     int tx_stat;
2010 
2011     spin_lock_bh(&chip->tx_lock);
2012     while (circ_cnt(chip->tx_dirty, chip->tx_clean, sunxi_gmac_dma_desc_tx) > 0) {
2013         entry = chip->tx_clean;
2014         desc = chip->dma_tx + entry;
2015 
2016         /* Check if the descriptor is owned by the DMA. */
2017         if (sunxi_gmac_desc_get_own(desc))
2018             break;
2019 
2020         /* Verify tx error by looking at the last segment */
2021         if (sunxi_gmac_desc_get_tx_last_seg(desc)) {
2022             tx_stat = sunxi_gmac_desc_get_tx_status(desc, (void *)(&chip->xstats));
2023 
2024             /*
2025              * These stats will be parsed by net framework layer
2026              * use ifconfig -a in linux cmdline to view
2027              */
2028             if (likely(!tx_stat))
2029                 chip->ndev->stats.tx_packets++;
2030             else
2031                 chip->ndev->stats.tx_errors++;
2032         }
2033 
2034         dma_unmap_single(chip->dev, (u32)sunxi_gmac_desc_buf_get_addr(desc),
2035                  sunxi_gmac_desc_buf_get_len(desc), DMA_TO_DEVICE);
2036 
2037         skb = chip->tx_skb[entry];
2038         chip->tx_skb[entry] = NULL;
2039         sunxi_gmac_desc_init(desc);
2040 
2041         /* Find next dirty desc */
2042         chip->tx_clean = circ_inc(entry, sunxi_gmac_dma_desc_tx);
2043 
2044         if (unlikely(skb == NULL))
2045             continue;
2046 
2047         dev_kfree_skb(skb);
2048     }
2049 
2050     if (unlikely(netif_queue_stopped(chip->ndev)) &&
2051         circ_space(chip->tx_dirty, chip->tx_clean, sunxi_gmac_dma_desc_tx) >
2052         SUNXI_GMAC_TX_THRESH) {
2053         netif_wake_queue(chip->ndev);
2054     }
2055     spin_unlock_bh(&chip->tx_lock);
2056 }
2057 
sunxi_gmac_xmit(struct sk_buff * skb,struct net_device * ndev)2058 static netdev_tx_t sunxi_gmac_xmit(struct sk_buff *skb, struct net_device *ndev)
2059 {
2060     struct sunxi_gmac *chip = netdev_priv(ndev);
2061     struct sunxi_gmac_dma_desc *desc, *first;
2062     unsigned int entry, len, tmp_len = 0;
2063     int i, csum_insert;
2064     int nfrags = skb_shinfo(skb)->nr_frags;
2065     dma_addr_t dma_addr;
2066 
2067     spin_lock_bh(&chip->tx_lock);
2068     if (unlikely(circ_space(chip->tx_dirty, chip->tx_clean,
2069         sunxi_gmac_dma_desc_tx) < (nfrags + 1))) {
2070         if (!netif_queue_stopped(ndev)) {
2071             netdev_err(ndev, "Error: Tx Ring full when queue awake\n");
2072             netif_stop_queue(ndev);
2073         }
2074         spin_unlock_bh(&chip->tx_lock);
2075 
2076         return NETDEV_TX_BUSY;
2077     }
2078 
2079     csum_insert = (skb->ip_summed == CHECKSUM_PARTIAL);
2080     entry = chip->tx_dirty;
2081     first = chip->dma_tx + entry;
2082     desc = chip->dma_tx + entry;
2083 
2084     len = skb_headlen(skb);
2085     chip->tx_skb[entry] = skb;
2086 
2087     /* dump the packet */
2088     netdev_dbg(ndev, "TX packet:\n");
2089     print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE,
2090                16, 1, skb->data, 64, true);
2091 
2092     /* Every desc max size is 2K */
2093     while (len != 0) {
2094         desc = chip->dma_tx + entry;
2095         tmp_len = ((len > SUNXI_GMAC_MAX_BUF_SZ) ?  SUNXI_GMAC_MAX_BUF_SZ : len);
2096 
2097         dma_addr = dma_map_single(chip->dev, skb->data, tmp_len, DMA_TO_DEVICE);
2098         if (dma_mapping_error(chip->dev, dma_addr)) {
2099             dev_kfree_skb(skb);
2100             return -ENOMEM;
2101         }
2102         sunxi_gmac_desc_buf_set(desc, dma_addr, tmp_len);
2103         /* Don't set the first's own bit, here */
2104         if (first != desc) {
2105             chip->tx_skb[entry] = NULL;
2106             sunxi_gmac_desc_set_own(desc);
2107         }
2108 
2109         entry = circ_inc(entry, sunxi_gmac_dma_desc_tx);
2110         len -= tmp_len;
2111     }
2112 
2113     for (i = 0; i < nfrags; i++) {
2114         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2115 
2116         len = skb_frag_size(frag);
2117         desc = chip->dma_tx + entry;
2118         dma_addr = skb_frag_dma_map(chip->dev, frag, 0, len, DMA_TO_DEVICE);
2119         if (dma_mapping_error(chip->dev, dma_addr)) {
2120             dev_kfree_skb(skb);
2121             return -ENOMEM;
2122         }
2123 
2124         sunxi_gmac_desc_buf_set(desc, dma_addr, len);
2125         sunxi_gmac_desc_set_own(desc);
2126         chip->tx_skb[entry] = NULL;
2127         entry = circ_inc(entry, sunxi_gmac_dma_desc_tx);
2128     }
2129 
2130     ndev->stats.tx_bytes += skb->len;
2131     chip->tx_dirty = entry;
2132     sunxi_gmac_desc_tx_close(first, desc, csum_insert);
2133 
2134     sunxi_gmac_desc_set_own(first);
2135     spin_unlock_bh(&chip->tx_lock);
2136 
2137     if (circ_space(chip->tx_dirty, chip->tx_clean, sunxi_gmac_dma_desc_tx) <=
2138             (MAX_SKB_FRAGS + 1)) {
2139         netif_stop_queue(ndev);
2140         if (circ_space(chip->tx_dirty, chip->tx_clean, sunxi_gmac_dma_desc_tx) >
2141                 SUNXI_GMAC_TX_THRESH)
2142             netif_wake_queue(ndev);
2143     }
2144 
2145     netdev_dbg(ndev, "TX descripotor DMA: 0x%08llx, dirty: %d, clean: %d\n",
2146             chip->dma_tx_phy, chip->tx_dirty, chip->tx_clean);
2147     sunxi_gmac_dump_dma_desc(chip->dma_tx, sunxi_gmac_dma_desc_tx);
2148 
2149     sunxi_gmac_tx_poll(chip->base);
2150     sunxi_gmac_tx_complete(chip);
2151 
2152     return NETDEV_TX_OK;
2153 }
2154 
sunxi_gmac_rx(struct sunxi_gmac * chip,int limit)2155 static int sunxi_gmac_rx(struct sunxi_gmac *chip, int limit)
2156 {
2157     unsigned int rxcount = 0;
2158     unsigned int entry;
2159     struct sunxi_gmac_dma_desc *desc;
2160     struct sk_buff *skb;
2161     int status;
2162     int frame_len;
2163 
2164     while (rxcount < limit) {
2165         entry = chip->rx_dirty;
2166         desc = chip->dma_rx + entry;
2167 
2168         if (sunxi_gmac_desc_get_own(desc))
2169             break;
2170 
2171         rxcount++;
2172         chip->rx_dirty = circ_inc(chip->rx_dirty, sunxi_gmac_dma_desc_rx);
2173 
2174         /* Get length & status from hardware */
2175         frame_len = sunxi_gmac_desc_rx_frame_len(desc);
2176         status = sunxi_gmac_desc_get_rx_status(desc, (void *)(&chip->xstats));
2177 
2178         netdev_dbg(chip->ndev, "Rx frame size %d, status: %d\n",
2179                frame_len, status);
2180 
2181         skb = chip->rx_skb[entry];
2182         if (unlikely(!skb)) {
2183             netdev_err(chip->ndev, "Skb is null\n");
2184             chip->ndev->stats.rx_dropped++;
2185             break;
2186         }
2187 
2188         netdev_dbg(chip->ndev, "RX packet:\n");
2189         /* dump the packet */
2190         print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE,
2191                 16, 1, skb->data, 64, true);
2192 
2193         if (status == discard_frame) {
2194             netdev_dbg(chip->ndev, "Get error pkt\n");
2195             chip->ndev->stats.rx_errors++;
2196             continue;
2197         }
2198 
2199         if (unlikely(status != llc_snap))
2200             frame_len -= ETH_FCS_LEN;
2201 
2202         chip->rx_skb[entry] = NULL;
2203 
2204         skb_put(skb, frame_len);
2205         dma_unmap_single(chip->dev, (u32)sunxi_gmac_desc_buf_get_addr(desc),
2206                  sunxi_gmac_desc_buf_get_len(desc), DMA_FROM_DEVICE);
2207 
2208         skb->protocol = eth_type_trans(skb, chip->ndev);
2209 
2210         skb->ip_summed = CHECKSUM_UNNECESSARY;
2211         napi_gro_receive(&chip->napi, skb);
2212 
2213         chip->ndev->stats.rx_packets++;
2214         chip->ndev->stats.rx_bytes += frame_len;
2215     }
2216 
2217     if (rxcount > 0) {
2218         netdev_dbg(chip->ndev, "RX descriptor DMA: 0x%08llx, dirty: %d, clean: %d\n",
2219                 chip->dma_rx_phy, chip->rx_dirty, chip->rx_clean);
2220         sunxi_gmac_dump_dma_desc(chip->dma_rx, sunxi_gmac_dma_desc_rx);
2221     }
2222 
2223     sunxi_gmac_rx_refill(chip->ndev);
2224 
2225     return rxcount;
2226 }
2227 
sunxi_gmac_poll(struct napi_struct * napi,int budget)2228 static int sunxi_gmac_poll(struct napi_struct *napi, int budget)
2229 {
2230     struct sunxi_gmac *chip = container_of(napi, struct sunxi_gmac, napi);
2231     int work_done = 0;
2232 
2233     sunxi_gmac_tx_complete(chip);
2234     work_done = sunxi_gmac_rx(chip, budget);
2235 
2236     if (work_done < budget) {
2237         napi_complete(napi);
2238         sunxi_gmac_irq_enable(chip->base);
2239     }
2240 
2241     return work_done;
2242 }
2243 
sunxi_gmac_change_mtu(struct net_device * ndev,int new_mtu)2244 static int sunxi_gmac_change_mtu(struct net_device *ndev, int new_mtu)
2245 {
2246     int max_mtu;
2247 
2248     if (netif_running(ndev)) {
2249         netdev_err(ndev, "Error: Nic must be stopped to change its MTU\n");
2250         return -EBUSY;
2251     }
2252 
2253     max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
2254 
2255     if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2256         netdev_err(ndev, "Error: Invalid MTU, max MTU is: %d\n", max_mtu);
2257         return -EINVAL;
2258     }
2259 
2260     ndev->mtu = new_mtu;
2261     netdev_update_features(ndev);
2262 
2263     return 0;
2264 }
2265 
sunxi_gmac_fix_features(struct net_device * ndev,netdev_features_t features)2266 static netdev_features_t sunxi_gmac_fix_features(struct net_device *ndev,
2267                        netdev_features_t features)
2268 {
2269     return features;
2270 }
2271 
sunxi_gmac_set_rx_mode(struct net_device * ndev)2272 static void sunxi_gmac_set_rx_mode(struct net_device *ndev)
2273 {
2274     struct sunxi_gmac *chip = netdev_priv(ndev);
2275     unsigned int value = 0;
2276 
2277     netdev_dbg(ndev, "%s: # mcasts %d, # unicast %d\n",
2278          __func__, netdev_mc_count(ndev), netdev_uc_count(ndev));
2279 
2280     spin_lock_bh(&chip->universal_lock);
2281     if (ndev->flags & IFF_PROMISC) {
2282         value = SUNXI_GMAC_FRAME_FILTER_PR;
2283     } else if ((netdev_mc_count(ndev) > SUNXI_GMAC_HASH_TABLE_SIZE) ||
2284            (ndev->flags & IFF_ALLMULTI)) {
2285         value = SUNXI_GMAC_FRAME_FILTER_PM;    /* pass all multi */
2286         sunxi_gmac_hash_filter(chip->base, ~0UL, ~0UL);
2287     } else if (!netdev_mc_empty(ndev)) {
2288         u32 mc_filter[2];
2289         struct netdev_hw_addr *ha;
2290 
2291         /* Hash filter for multicast */
2292         value = SUNXI_GMAC_FRAME_FILTER_HMC;
2293 
2294         memset(mc_filter, 0, sizeof(mc_filter));
2295         netdev_for_each_mc_addr(ha, ndev) {
2296             /* The upper 6 bits of the calculated CRC are used to
2297              *  index the contens of the hash table
2298              */
2299             int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
2300             /* The most significant bit determines the register to
2301              * use (H/L) while the other 5 bits determine the bit
2302              * within the register.
2303              */
2304             mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2305         }
2306         sunxi_gmac_hash_filter(chip->base, mc_filter[0], mc_filter[1]);
2307     }
2308 
2309     /* Handle multiple unicast addresses (perfect filtering)*/
2310     if (netdev_uc_count(ndev) > 16) {
2311         /* Switch to promiscuous mode is more than 8 addrs are required */
2312         value |= SUNXI_GMAC_FRAME_FILTER_PR;
2313     } else {
2314         int reg = 1;
2315         struct netdev_hw_addr *ha;
2316 
2317         netdev_for_each_uc_addr(ha, ndev) {
2318             sunxi_gmac_set_mac_addr_to_reg(chip->base, ha->addr, reg);
2319             reg++;
2320         }
2321     }
2322 
2323 #ifdef FRAME_FILTER_DEBUG
2324     /* Enable Receive all mode (to debug filtering_fail errors) */
2325     value |= SUNXI_GMAC_FRAME_FILTER_RA;
2326 #endif
2327     sunxi_gmac_set_filter(chip->base, value);
2328     spin_unlock_bh(&chip->universal_lock);
2329 }
2330 
sunxi_gmac_tx_timeout(struct net_device * ndev,unsigned int txqueue)2331 static void sunxi_gmac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
2332 {
2333     struct sunxi_gmac *chip = netdev_priv(ndev);
2334 
2335     sunxi_gmac_tx_err(chip);
2336 }
2337 
sunxi_gmac_ioctl(struct net_device * ndev,struct ifreq * rq,int cmd)2338 static int sunxi_gmac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2339 {
2340     if (!netif_running(ndev))
2341         return -EINVAL;
2342 
2343     if (!ndev->phydev)
2344         return -EINVAL;
2345 
2346     return phy_mii_ioctl(ndev->phydev, rq, cmd);
2347 }
2348 
2349 /* Configuration changes (passed on by ifconfig) */
sunxi_gmac_config(struct net_device * ndev,struct ifmap * map)2350 static int sunxi_gmac_config(struct net_device *ndev, struct ifmap *map)
2351 {
2352     if (ndev->flags & IFF_UP)    /* can't act on a running interface */
2353         return -EBUSY;
2354 
2355     /* Don't allow changing the I/O address */
2356     if (map->base_addr != ndev->base_addr) {
2357         netdev_err(ndev, "Error: Can't change I/O address\n");
2358         return -EOPNOTSUPP;
2359     }
2360 
2361     /* Don't allow changing the IRQ */
2362     if (map->irq != ndev->irq) {
2363         netdev_err(ndev, "Error: Can't change IRQ number %d\n", ndev->irq);
2364         return -EOPNOTSUPP;
2365     }
2366 
2367     return 0;
2368 }
2369 
sunxi_gmac_set_mac_address(struct net_device * ndev,void * p)2370 static int sunxi_gmac_set_mac_address(struct net_device *ndev, void *p)
2371 {
2372     struct sunxi_gmac *chip = netdev_priv(ndev);
2373     struct sockaddr *addr = p;
2374 
2375     if (!is_valid_ether_addr(addr->sa_data)) {
2376         netdev_err(ndev, "Error: Set error mac address\n");
2377         return -EADDRNOTAVAIL;
2378     }
2379 
2380     memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
2381     sunxi_gmac_set_mac_addr_to_reg(chip->base, ndev->dev_addr, 0);
2382 
2383     return 0;
2384 }
2385 
sunxi_gmac_set_features(struct net_device * ndev,netdev_features_t features)2386 static int sunxi_gmac_set_features(struct net_device *ndev, netdev_features_t features)
2387 {
2388     struct sunxi_gmac *chip = netdev_priv(ndev);
2389 
2390     if (features & NETIF_F_LOOPBACK && netif_running(ndev))
2391         sunxi_gmac_loopback(chip->base, 1);
2392     else
2393         sunxi_gmac_loopback(chip->base, 0);
2394 
2395     return 0;
2396 }
2397 
2398 #if IS_ENABLED(CONFIG_NET_POLL_CONTROLLER)
2399 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2400  * to allow network I/O with interrupts disabled.
2401  */
sunxi_gmac_poll_controller(struct net_device * dev)2402 static void sunxi_gmac_poll_controller(struct net_device *dev)
2403 {
2404     disable_irq(dev->irq);
2405     sunxi_gmac_interrupt(dev->irq, dev);
2406     enable_irq(dev->irq);
2407 }
2408 #endif
2409 
2410 static const struct net_device_ops sunxi_gmac_netdev_ops = {
2411     .ndo_init = NULL,
2412     .ndo_open = sunxi_gmac_open,
2413     .ndo_start_xmit = sunxi_gmac_xmit,
2414     .ndo_stop = sunxi_gmac_stop,
2415     .ndo_change_mtu = sunxi_gmac_change_mtu,
2416     .ndo_fix_features = sunxi_gmac_fix_features,
2417     .ndo_set_rx_mode = sunxi_gmac_set_rx_mode,
2418     .ndo_tx_timeout = sunxi_gmac_tx_timeout,
2419     .ndo_do_ioctl = sunxi_gmac_ioctl,
2420     .ndo_set_config = sunxi_gmac_config,
2421 #if IS_ENABLED(CONFIG_NET_POLL_CONTROLLER)
2422     .ndo_poll_controller = sunxi_gmac_poll_controller,
2423 #endif
2424     .ndo_set_mac_address = sunxi_gmac_set_mac_address,
2425     .ndo_set_features = sunxi_gmac_set_features,
2426 };
2427 
sunxi_gmac_check_if_running(struct net_device * ndev)2428 static int sunxi_gmac_check_if_running(struct net_device *ndev)
2429 {
2430     if (!netif_running(ndev))
2431         return -EBUSY;
2432     return 0;
2433 }
2434 
sunxi_gmac_ethtool_get_sset_count(struct net_device * netdev,int sset)2435 static int sunxi_gmac_ethtool_get_sset_count(struct net_device *netdev, int sset)
2436 {
2437     int len;
2438 
2439     switch (sset) {
2440     case ETH_SS_STATS:
2441         len = 0;
2442         return len;
2443     default:
2444         return -EOPNOTSUPP;
2445     }
2446 }
2447 
2448 
2449 /**
2450  * sunxi_gmac_ethtool_getdrvinfo - Get various SUNXI GMAC driver information.
2451  * @ndev:    Pointer to net_device structure
2452  * @ed:        Pointer to ethtool_drvinfo structure
2453  *
2454  * This implements ethtool command for getting the driver information.
2455  * Issue "ethtool -i ethX" under linux prompt to execute this function.
2456  */
sunxi_gmac_ethtool_getdrvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)2457 static void sunxi_gmac_ethtool_getdrvinfo(struct net_device *ndev,
2458                     struct ethtool_drvinfo *info)
2459 {
2460     strlcpy(info->driver, "sunxi_gmac", sizeof(info->driver));
2461 
2462     strcpy(info->version, SUNXI_GMAC_MODULE_VERSION);
2463     info->fw_version[0] = '\0';
2464 }
2465 
2466 /**
2467  * sunxi_gmac_ethool_get_pauseparam - Get the pause parameter setting for Tx/Rx.
2468  *
2469  * @ndev:    Pointer to net_device structure
2470  * @epause:    Pointer to ethtool_pauseparam structure
2471  *
2472  * This implements ethtool command for getting sunxi_gmac ethernet pause frame
2473  * setting. Issue "ethtool -a ethx" to execute this function.
2474  */
sunxi_gmac_ethtool_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epause)2475 static void sunxi_gmac_ethtool_get_pauseparam(struct net_device *ndev,
2476                     struct ethtool_pauseparam *epause)
2477 {
2478     struct sunxi_gmac *chip = netdev_priv(ndev);
2479 
2480     /* TODO: need to support autoneg */
2481     epause->tx_pause = sunxi_gmac_read_tx_flowctl(chip->base);
2482     epause->rx_pause = sunxi_gmac_read_rx_flowctl(chip->base);
2483 }
2484 
2485 /**
2486  * sunxi_gmac_ethtool_set_pauseparam - Set device pause paramter(flow contrl)
2487  *                settings.
2488  * @ndev:    Pointer to net_device structure
2489  * @epause:    Pointer to ethtool_pauseparam structure
2490  *
2491  * This implements ethtool command for enabling flow control on Rx and Tx.
2492  * Issue "ethtool -A ethx tx on|off" under linux prompt to execute this
2493  * function.
2494  *
2495  */
sunxi_gmac_ethtool_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epause)2496 static int sunxi_gmac_ethtool_set_pauseparam(struct net_device *ndev,
2497                     struct ethtool_pauseparam *epause)
2498 {
2499     struct sunxi_gmac *chip = netdev_priv(ndev);
2500 
2501     sunxi_gmac_write_tx_flowctl(chip->base, !!epause->tx_pause);
2502     netdev_info(ndev, "Tx flowctrl %s\n", epause->tx_pause ? "ON" : "OFF");
2503 
2504     sunxi_gmac_write_rx_flowctl(chip->base, !!epause->rx_pause);
2505     netdev_info(ndev, "Rx flowctrl %s\n", epause->rx_pause ? "ON" : "OFF");
2506 
2507     return 0;
2508 }
2509 
2510 /**
2511  * sunxi_gmac_ethtool_get_wol - Get device wake-on-lan settings.
2512  *
2513  * @ndev:    Pointer to net_device structure
2514  * @wol:    Pointer to ethtool_wolinfo structure
2515  *
2516  * This implements ethtool command for get wake-on-lan settings.
2517  * Issue "ethtool -s ethx wol p|u|m|b|a|g|s|d" under linux prompt to execute
2518  * this function.
2519  */
sunxi_gmac_ethtool_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)2520 static void sunxi_gmac_ethtool_get_wol(struct net_device *ndev,
2521                 struct ethtool_wolinfo *wol)
2522 {
2523     struct sunxi_gmac *chip = netdev_priv(ndev);
2524 
2525     spin_lock_irq(&chip->universal_lock);
2526     /* TODO: need to support wol */
2527     spin_unlock_irq(&chip->universal_lock);
2528 
2529     netdev_err(ndev, "Error: wakeup-on-lan func is not supported yet\n");
2530 }
2531 
2532 /**
2533  * sunxi_gmac_ethtool_set_wol - set device wake-on-lan settings.
2534  *
2535  * @ndev:    Pointer to net_device structure
2536  * @wol:    Pointer to ethtool_wolinfo structure
2537  *
2538  * This implements ethtool command for set wake-on-lan settings.
2539  * Issue "ethtool -s ethx wol p|u|n|b|a|g|s|d" under linux prompt to execute
2540  * this function.
2541  */
sunxi_gmac_ethtool_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)2542 static int sunxi_gmac_ethtool_set_wol(struct net_device *ndev,
2543                 struct ethtool_wolinfo *wol)
2544 {
2545     /*
2546      * TODO: Wake-on-lane function need to be supported.
2547      */
2548 
2549     return 0;
2550 }
2551 
2552 static const struct ethtool_ops sunxi_gmac_ethtool_ops = {
2553     .begin = sunxi_gmac_check_if_running,
2554     .get_link = ethtool_op_get_link,
2555     .get_pauseparam = sunxi_gmac_ethtool_get_pauseparam,
2556     .set_pauseparam = sunxi_gmac_ethtool_set_pauseparam,
2557     .get_wol = sunxi_gmac_ethtool_get_wol,
2558     .set_wol = sunxi_gmac_ethtool_set_wol,
2559     .get_sset_count = sunxi_gmac_ethtool_get_sset_count,
2560     .get_drvinfo = sunxi_gmac_ethtool_getdrvinfo,
2561     .get_link_ksettings = phy_ethtool_get_link_ksettings,
2562     .set_link_ksettings = phy_ethtool_set_link_ksettings,
2563 };
2564 
sunxi_gmac_hardware_init(struct platform_device * pdev)2565 static int sunxi_gmac_hardware_init(struct platform_device *pdev)
2566 {
2567     struct net_device *ndev = platform_get_drvdata(pdev);
2568     struct sunxi_gmac *chip = netdev_priv(ndev);
2569     int ret;
2570 
2571     ret = sunxi_gmac_power_on(chip);
2572     if (ret) {
2573         netdev_err(ndev, "Error: Gmac power on failed\n");
2574         ret = -EINVAL;
2575         goto power_on_err;
2576     }
2577 
2578     ret = sunxi_gmac_clk_enable(chip);
2579     if (ret) {
2580         netdev_err(ndev, "Error: Clk enable is failed\n");
2581         ret = -EINVAL;
2582         goto clk_enable_err;
2583     }
2584 
2585 #ifdef CONFIG_AW_EPHY_AC300
2586     ret = pwm_config(chip->ac300_pwm, PWM_DUTY_NS, PWM_PERIOD_NS);
2587     if (ret) {
2588         netdev_err(ndev, "Error: Config ac300 pwm failed\n");
2589         ret = -EINVAL;
2590         goto pwm_config_err;
2591     }
2592 
2593     ret = pwm_enable(chip->ac300_pwm);
2594     if (ret) {
2595         netdev_err(ndev, "Error: Enable ac300 pwm failed\n");
2596         ret = -EINVAL;
2597         goto pwm_enable_err;
2598     }
2599 #endif /* CONFIG_AW_EPHY_AC300 */
2600 
2601     return 0;
2602 
2603 #ifdef CONFIG_AW_EPHY_AC300
2604 pwm_enable_err:
2605 pwm_config_err:
2606     sunxi_gmac_clk_disable(chip);
2607 #endif    /* CONFIG_AW_EPHY_AC300 */
2608 clk_enable_err:
2609     sunxi_gmac_power_off(chip);
2610 power_on_err:
2611     return ret;
2612 }
2613 
sunxi_gmac_hardware_deinit(struct platform_device * pdev)2614 static void sunxi_gmac_hardware_deinit(struct platform_device *pdev)
2615 {
2616     struct net_device *ndev = platform_get_drvdata(pdev);
2617     struct sunxi_gmac *chip = netdev_priv(ndev);
2618 
2619     sunxi_gmac_power_off(chip);
2620 
2621     sunxi_gmac_clk_disable(chip);
2622 
2623 #ifdef CONFIG_AW_EPHY_AC300
2624     pwm_disable(chip->ac300_pwm);
2625 #endif /*CONFIG_AW_EPHY_AC300 */
2626 }
2627 
sunxi_gmac_resource_get(struct platform_device * pdev)2628 static int sunxi_gmac_resource_get(struct platform_device *pdev)
2629 {
2630     struct net_device *ndev = platform_get_drvdata(pdev);
2631     struct sunxi_gmac *chip = netdev_priv(ndev);
2632     struct device_node *np = pdev->dev.of_node;
2633     struct resource *res;
2634     phy_interface_t phy_mode;
2635     u32 value;
2636     char power_vol[SUNXI_GMAC_POWER_CHAR_LENGTH];
2637     char power[SUNXI_GMAC_POWER_CHAR_LENGTH];
2638     int ret, i;
2639 
2640     /* External phy is selected by default */
2641     chip->phy_type = SUNXI_EXTERNAL_PHY;
2642 
2643     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2644     if (!res) {
2645         netdev_err(ndev, "Error: Get gmac memory failed\n");
2646         return -ENODEV;
2647     }
2648 
2649     chip->base = devm_ioremap_resource(&pdev->dev, res);
2650     if (!chip->base) {
2651         netdev_err(ndev, "Error: Gmac memory mapping failed\n");
2652         return -ENOMEM;
2653     }
2654 
2655     res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2656     if (!res) {
2657         netdev_err(ndev, "Error: Get phy memory failed\n");
2658         return -ENODEV;
2659     }
2660 
2661     chip->syscfg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
2662     if (!chip->syscfg_base) {
2663         netdev_err(ndev, "Error: Phy memory mapping failed\n");
2664         return -ENOMEM;
2665     }
2666 
2667     ndev->irq = platform_get_irq_byname(pdev, "gmacirq");
2668     if (ndev->irq < 0) {
2669         netdev_err(ndev, "Error: Gmac irq not found\n");
2670         return -ENXIO;
2671     }
2672 
2673     ret = devm_request_irq(&pdev->dev, ndev->irq, sunxi_gmac_interrupt, IRQF_SHARED, dev_name(&pdev->dev), ndev);
2674     if (ret) {
2675         netdev_err(ndev, "Error: Could not request irq %d\n", ndev->irq);
2676         return -EINVAL;
2677     }
2678 
2679     chip->reset = devm_reset_control_get(&pdev->dev, NULL);
2680     if (IS_ERR(chip->reset)) {
2681         netdev_err(ndev, "Error: Get gmac rst failed\n");
2682         return -EINVAL;
2683     }
2684 
2685     chip->pinctrl = devm_pinctrl_get(&pdev->dev);
2686     if (IS_ERR(chip->pinctrl)) {
2687         netdev_err(ndev, "Error: Get Pin failed\n");
2688         return -EIO;
2689     }
2690 
2691     chip->gmac_clk = devm_clk_get(&pdev->dev, "gmac");
2692     if (!chip->gmac_clk) {
2693         netdev_err(ndev, "Error: Get gmac clock failed\n");
2694         return -EINVAL;
2695     }
2696 
2697     ret = of_get_phy_mode(np, &phy_mode);
2698     if (!ret) {
2699         chip->phy_interface = phy_mode;
2700         if (chip->phy_interface != PHY_INTERFACE_MODE_RGMII &&
2701         chip->phy_interface != PHY_INTERFACE_MODE_RMII) {
2702             netdev_err(ndev, "Error: Get gmac phy interface failed\n");
2703             return -EINVAL;
2704         }
2705     }
2706 
2707     ret = of_property_read_u32(np, "tx-delay", &chip->tx_delay);
2708     if (ret) {
2709         netdev_warn(ndev, "Warning: Get gmac tx-delay failed, use default 0\n");
2710         chip->tx_delay = 0;
2711     }
2712 
2713     ret = of_property_read_u32(np, "rx-delay", &chip->rx_delay);
2714     if (ret) {
2715         netdev_warn(ndev, "Warning: Get gmac rx-delay failed, use default 0\n");
2716         chip->rx_delay = 0;
2717     }
2718 
2719     chip->phy_node = of_parse_phandle(np, "phy-handle", 0);
2720     if (!chip->phy_node) {
2721         netdev_err(ndev, "Error: Get gmac phy-handle failed\n");
2722         return -EINVAL;
2723     }
2724 
2725 #ifdef CONFIG_AW_EPHY_AC300
2726     /*
2727      * If use Internal phy such as ac300,
2728      * change the phy_type to internal phy
2729      */
2730     chip->phy_type = SUNXI_INTERNAL_PHY;
2731     chip->ac300_np = of_parse_phandle(np, "ac300-phy-handle", 0);
2732     if (!chip->ac300_np) {
2733         netdev_err(ndev, "Error: Get gmac ac300-phy-handle failed\n");
2734         return -EINVAL;
2735     }
2736 
2737     ret = of_property_read_u32(np, "sunxi,pwm-channel", &chip->pwm_channel);
2738     if (ret) {
2739         netdev_err(ndev, "Error: Get ac300 pwm failed\n");
2740         return -EINVAL;
2741     }
2742 
2743     chip->ac300_pwm = pwm_request(chip->pwm_channel, NULL);
2744     if (IS_ERR_OR_NULL(chip->ac300_pwm)) {
2745         netdev_err(ndev, "Error: Get ac300 pwm failed\n");
2746         return -EINVAL;
2747     }
2748 #endif /* CONFIG_AW_EPHY_AC300 */
2749 
2750     ret = of_property_read_u32(np, "sunxi,phy-clk-type", &chip->phy_clk_type);
2751     if (ret) {
2752         netdev_err(ndev, "Error: Get gmac phy-clk-type failed\n");
2753         return -EINVAL;
2754     }
2755 
2756     if (chip->phy_clk_type == SUNXI_PHY_USE_CLK25M) {
2757         chip->phy25m_clk = devm_clk_get(&pdev->dev, "phy25m");
2758         if (IS_ERR_OR_NULL(chip->phy25m_clk)) {
2759             netdev_err(ndev, "Error: Get phy25m clk failed\n");
2760             return -EINVAL;
2761         }
2762     }
2763 
2764     for (i = 0; i < SUNXI_GMAC_POWER_CHAN_NUM; i++) {
2765         sprintf(power, "gmac-power%d", i);
2766         /* get gmac_supplyX voltage */
2767         sprintf(power_vol, "gmac-power%d-vol", i);
2768         if (!of_property_read_u32(np, power_vol, &value)) {
2769             chip->gmac_supply_vol[i] = value;
2770             netdev_dbg(ndev, "Info: Gmac_power_vol[%d] = %d\n", i, value);
2771         } else {
2772             chip->gmac_supply_vol[i] = 0;
2773         }
2774         chip->gmac_supply[i] = regulator_get(&pdev->dev, power);
2775 
2776         if (IS_ERR(chip->gmac_supply[i]))
2777             netdev_err(ndev, "Error: gmac-power%d get error\n", i);
2778         else
2779             netdev_dbg(ndev, "gmac-power%d get success\n", i);
2780     }
2781 
2782     return 0;
2783 }
2784 
sunxi_gmac_resource_put(struct platform_device * pdev)2785 static void sunxi_gmac_resource_put(struct platform_device *pdev)
2786 {
2787 #ifdef CONFIG_AW_EPHY_AC300
2788     struct net_device *ndev = platform_get_drvdata(pdev);
2789     struct sunxi_gmac *chip = netdev_priv(ndev);
2790 
2791     pwm_free(chip->ac300_pwm);
2792 #endif /* CONFIG_AW_EPHY_AC300 */
2793 }
2794 
sunxi_gmac_sysfs_create(struct device * dev)2795 static void sunxi_gmac_sysfs_create(struct device *dev)
2796 {
2797     device_create_file(dev, &dev_attr_gphy_test);
2798     device_create_file(dev, &dev_attr_mii_read);
2799     device_create_file(dev, &dev_attr_mii_write);
2800     device_create_file(dev, &dev_attr_loopback);
2801     device_create_file(dev, &dev_attr_extra_tx_stats);
2802     device_create_file(dev, &dev_attr_extra_rx_stats);
2803 }
2804 
sunxi_gmac_sysfs_destroy(struct device * dev)2805 static void sunxi_gmac_sysfs_destroy(struct device *dev)
2806 {
2807     device_remove_file(dev, &dev_attr_gphy_test);
2808     device_remove_file(dev, &dev_attr_mii_read);
2809     device_remove_file(dev, &dev_attr_mii_write);
2810     device_remove_file(dev, &dev_attr_loopback);
2811     device_remove_file(dev, &dev_attr_extra_tx_stats);
2812     device_remove_file(dev, &dev_attr_extra_rx_stats);
2813 }
2814 
2815 /**
2816  * sunxi_gmac_probe - GMAC device probe
2817  * @pdev: The SUNXI GMAC device that we are removing
2818  *
2819  * Called when probing for GMAC device. We get details of instances and
2820  * resource information from platform init and register a network device
2821  * and allocate resources necessary for driver to perform
2822  *
2823  */
sunxi_gmac_probe(struct platform_device * pdev)2824 static int sunxi_gmac_probe(struct platform_device *pdev)
2825 {
2826     int ret;
2827     struct net_device *ndev;
2828     struct sunxi_gmac *chip;
2829 
2830     dev_dbg(&pdev->dev, "%s() BEGIN\n", __func__);
2831 
2832     pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2833     pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
2834 
2835     ndev = alloc_etherdev(sizeof(*ndev));
2836     if (!ndev) {
2837         dev_err(&pdev->dev, "Error: Allocate network device failed\n");
2838         ret = -ENOMEM;
2839         goto alloc_etherdev_err;
2840     }
2841     SET_NETDEV_DEV(ndev, &pdev->dev);
2842 
2843     chip = netdev_priv(ndev);
2844     platform_set_drvdata(pdev, ndev);
2845 
2846     ret = sunxi_gmac_resource_get(pdev);
2847     if (ret) {
2848         dev_err(&pdev->dev, "Error: Get gmac hardware resource failed\n");
2849         goto resource_get_err;
2850     }
2851 
2852     ret = sunxi_gmac_hardware_init(pdev);
2853     if (ret) {
2854         dev_err(&pdev->dev, "Error: Init gmac hardware resource failed\n");
2855         goto hardware_init_err;
2856     }
2857 
2858     /*
2859      * setup the netdevice
2860      * fillup netdevice base memory and ops
2861      * fillup netdevice ethtool ops
2862      */
2863     ether_setup(ndev);
2864     ndev->netdev_ops = &sunxi_gmac_netdev_ops;
2865     netdev_set_default_ethtool_ops(ndev, &sunxi_gmac_ethtool_ops);
2866     ndev->base_addr = (unsigned long)chip->base;
2867     chip->ndev = ndev;
2868     chip->dev = &pdev->dev;
2869 
2870     /* fillup netdevice features and flags */
2871     ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
2872                 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_GRO;
2873     ndev->features |= ndev->hw_features;
2874     ndev->hw_features |= NETIF_F_LOOPBACK;
2875     ndev->priv_flags |= IFF_UNICAST_FLT;
2876     ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2877 
2878     /* add napi poll method */
2879     netif_napi_add(ndev, &chip->napi, sunxi_gmac_poll, SUNXI_GMAC_BUDGET);
2880 
2881     spin_lock_init(&chip->universal_lock);
2882     spin_lock_init(&chip->tx_lock);
2883 
2884     ret = register_netdev(ndev);
2885     if (ret) {
2886         dev_err(&pdev->dev, "Error: Register %s failed\n", ndev->name);
2887         goto register_err;
2888     }
2889 
2890     /* Before open the device, the mac address should be set */
2891     sunxi_gmac_check_addr(ndev, mac_str);
2892 
2893     ret = sunxi_gmac_dma_desc_init(ndev);
2894     if (ret) {
2895         dev_err(&pdev->dev, "Error: Init dma descriptor failed\n");
2896         goto init_dma_desc_err;
2897     }
2898 
2899     sunxi_gmac_sysfs_create(&pdev->dev);
2900 
2901     dev_dbg(&pdev->dev, "%s() SUCCESS\n", __func__);
2902 
2903     return 0;
2904 
2905 init_dma_desc_err:
2906     unregister_netdev(ndev);
2907 register_err:
2908     netif_napi_del(&chip->napi);
2909     sunxi_gmac_hardware_deinit(pdev);
2910 hardware_init_err:
2911     sunxi_gmac_resource_put(pdev);
2912 resource_get_err:
2913     platform_set_drvdata(pdev, NULL);
2914     free_netdev(ndev);
2915 alloc_etherdev_err:
2916     return ret;
2917 }
2918 
sunxi_gmac_remove(struct platform_device * pdev)2919 static int sunxi_gmac_remove(struct platform_device *pdev)
2920 {
2921     struct net_device *ndev = platform_get_drvdata(pdev);
2922     struct sunxi_gmac *chip = netdev_priv(ndev);
2923 
2924     sunxi_gmac_sysfs_destroy(&pdev->dev);
2925     sunxi_gmac_dma_desc_deinit(chip);
2926     unregister_netdev(ndev);
2927     netif_napi_del(&chip->napi);
2928     sunxi_gmac_hardware_deinit(pdev);
2929     sunxi_gmac_resource_put(pdev);
2930     platform_set_drvdata(pdev, NULL);
2931     free_netdev(ndev);
2932     return 0;
2933 }
2934 
2935 static const struct of_device_id sunxi_gmac_of_match[] = {
2936     {.compatible = "allwinner,sunxi-gmac",},
2937     {},
2938 };
2939 MODULE_DEVICE_TABLE(of, sunxi_gmac_of_match);
2940 
2941 static struct platform_driver sunxi_gmac_driver = {
2942     .probe    = sunxi_gmac_probe,
2943     .remove = sunxi_gmac_remove,
2944     .driver = {
2945            .name = "sunxi-gmac",
2946            .owner = THIS_MODULE,
2947            .pm = &sunxi_gmac_pm_ops,
2948            .of_match_table = sunxi_gmac_of_match,
2949     },
2950 };
2951 module_platform_driver(sunxi_gmac_driver);
2952 
2953 #ifndef MODULE
sunxi_gmac_set_mac_addr(char * str)2954 static int __init sunxi_gmac_set_mac_addr(char *str)
2955 {
2956     char *p = str;
2957 
2958     /**
2959      * mac address: xx:xx:xx:xx:xx:xx
2960      * The reason why memcpy 18 bytes is
2961      * the `/0`.
2962      */
2963     if (str && strlen(str))
2964         memcpy(mac_str, p, MAC_ADDR_LEN);
2965 
2966     return 0;
2967 }
2968 /* TODO: When used more than one mac,
2969  * parsing the mac address becomes a problem.
2970  * Maybe use this way: mac0_addr=, mac1_addr=
2971  */
2972 __setup("mac_addr=", sunxi_gmac_set_mac_addr);
2973 #endif /* MODULE */
2974 
2975 MODULE_DESCRIPTION("Allwinner GMAC driver");
2976 MODULE_AUTHOR("xuminghui <xuminghui@allwinnertech.com>");
2977 MODULE_LICENSE("Dual BSD/GPL");
2978 MODULE_VERSION(SUNXI_GMAC_MODULE_VERSION);
2979