• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #ifndef __SPARX5_MAIN_H__
8 #define __SPARX5_MAIN_H__
9 
10 #include <linux/types.h>
11 #include <linux/phy/phy.h>
12 #include <linux/netdevice.h>
13 #include <linux/phy.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bitmap.h>
16 #include <linux/phylink.h>
17 #include <linux/hrtimer.h>
18 
19 /* Target chip type */
20 enum spx5_target_chiptype {
21 	SPX5_TARGET_CT_7546    = 0x7546,  /* SparX-5-64  Enterprise */
22 	SPX5_TARGET_CT_7549    = 0x7549,  /* SparX-5-90  Enterprise */
23 	SPX5_TARGET_CT_7552    = 0x7552,  /* SparX-5-128 Enterprise */
24 	SPX5_TARGET_CT_7556    = 0x7556,  /* SparX-5-160 Enterprise */
25 	SPX5_TARGET_CT_7558    = 0x7558,  /* SparX-5-200 Enterprise */
26 	SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
27 	SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
28 	SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
29 	SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
30 	SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
31 };
32 
33 enum sparx5_port_max_tags {
34 	SPX5_PORT_MAX_TAGS_NONE,  /* No extra tags allowed */
35 	SPX5_PORT_MAX_TAGS_ONE,   /* Single tag allowed */
36 	SPX5_PORT_MAX_TAGS_TWO    /* Single and double tag allowed */
37 };
38 
39 enum sparx5_vlan_port_type {
40 	SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */
41 	SPX5_VLAN_PORT_TYPE_C,       /* C-port */
42 	SPX5_VLAN_PORT_TYPE_S,       /* S-port */
43 	SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
44 };
45 
46 #define SPX5_PORTS             65
47 #define SPX5_PORT_CPU          (SPX5_PORTS)  /* Next port is CPU port */
48 #define SPX5_PORT_CPU_0        (SPX5_PORT_CPU + 0) /* CPU Port 65 */
49 #define SPX5_PORT_CPU_1        (SPX5_PORT_CPU + 1) /* CPU Port 66 */
50 #define SPX5_PORT_VD0          (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
51 #define SPX5_PORT_VD1          (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
52 #define SPX5_PORT_VD2          (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
53 #define SPX5_PORTS_ALL         (SPX5_PORT_CPU + 5) /* Total number of ports */
54 
55 #define PGID_BASE              SPX5_PORTS /* Starts after port PGIDs */
56 #define PGID_UC_FLOOD          (PGID_BASE + 0)
57 #define PGID_MC_FLOOD          (PGID_BASE + 1)
58 #define PGID_IPV4_MC_DATA      (PGID_BASE + 2)
59 #define PGID_IPV4_MC_CTRL      (PGID_BASE + 3)
60 #define PGID_IPV6_MC_DATA      (PGID_BASE + 4)
61 #define PGID_IPV6_MC_CTRL      (PGID_BASE + 5)
62 #define PGID_BCAST	       (PGID_BASE + 6)
63 #define PGID_CPU	       (PGID_BASE + 7)
64 
65 #define IFH_LEN                9 /* 36 bytes */
66 #define NULL_VID               0
67 #define SPX5_MACT_PULL_DELAY   (2 * HZ)
68 #define SPX5_STATS_CHECK_DELAY (1 * HZ)
69 #define SPX5_PRIOS             8     /* Number of priority queues */
70 #define SPX5_BUFFER_CELL_SZ    184   /* Cell size  */
71 #define SPX5_BUFFER_MEMORY     4194280 /* 22795 words * 184 bytes */
72 
73 #define XTR_QUEUE     0
74 #define INJ_QUEUE     0
75 
76 #define FDMA_DCB_MAX			64
77 #define FDMA_RX_DCB_MAX_DBS		15
78 #define FDMA_TX_DCB_MAX_DBS		1
79 
80 struct sparx5;
81 
82 struct sparx5_db_hw {
83 	u64 dataptr;
84 	u64 status;
85 };
86 
87 struct sparx5_rx_dcb_hw {
88 	u64 nextptr;
89 	u64 info;
90 	struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS];
91 };
92 
93 struct sparx5_tx_dcb_hw {
94 	u64 nextptr;
95 	u64 info;
96 	struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS];
97 };
98 
99 /* Frame DMA receive state:
100  * For each DB, there is a SKB, and the skb data pointer is mapped in
101  * the DB. Once a frame is received the skb is given to the upper layers
102  * and a new skb is added to the dcb.
103  * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused.
104  */
105 struct sparx5_rx {
106 	struct sparx5_rx_dcb_hw *dcb_entries;
107 	struct sparx5_rx_dcb_hw *last_entry;
108 	struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
109 	int db_index;
110 	int dcb_index;
111 	dma_addr_t dma;
112 	struct napi_struct napi;
113 	u32 channel_id;
114 	struct net_device *ndev;
115 	u64 packets;
116 };
117 
118 /* Frame DMA transmit state:
119  * DCBs are chained using the DCBs nextptr field.
120  */
121 struct sparx5_tx {
122 	struct sparx5_tx_dcb_hw *curr_entry;
123 	struct sparx5_tx_dcb_hw *first_entry;
124 	struct list_head db_list;
125 	dma_addr_t dma;
126 	u32 channel_id;
127 	u64 packets;
128 	u64 dropped;
129 };
130 
131 struct sparx5_port_config {
132 	phy_interface_t portmode;
133 	u32 bandwidth;
134 	int speed;
135 	int duplex;
136 	enum phy_media media;
137 	bool inband;
138 	bool power_down;
139 	bool autoneg;
140 	bool serdes_reset;
141 	u32 pause;
142 	u32 pause_adv;
143 	phy_interface_t phy_mode;
144 	u32 sd_sgpio;
145 };
146 
147 struct sparx5_port {
148 	struct net_device *ndev;
149 	struct sparx5 *sparx5;
150 	struct device_node *of_node;
151 	struct phy *serdes;
152 	struct sparx5_port_config conf;
153 	struct phylink_config phylink_config;
154 	struct phylink *phylink;
155 	struct phylink_pcs phylink_pcs;
156 	u16 portno;
157 	/* Ingress default VLAN (pvid) */
158 	u16 pvid;
159 	/* Egress default VLAN (vid) */
160 	u16 vid;
161 	bool signd_internal;
162 	bool signd_active_high;
163 	bool signd_enable;
164 	bool flow_control;
165 	enum sparx5_port_max_tags max_vlan_tags;
166 	enum sparx5_vlan_port_type vlan_type;
167 	u32 custom_etype;
168 	u32 ifh[IFH_LEN];
169 	bool vlan_aware;
170 	struct hrtimer inj_timer;
171 };
172 
173 enum sparx5_core_clockfreq {
174 	SPX5_CORE_CLOCK_DEFAULT,  /* Defaults to the highest supported frequency */
175 	SPX5_CORE_CLOCK_250MHZ,   /* 250MHZ core clock frequency */
176 	SPX5_CORE_CLOCK_500MHZ,   /* 500MHZ core clock frequency */
177 	SPX5_CORE_CLOCK_625MHZ,   /* 625MHZ core clock frequency */
178 };
179 
180 struct sparx5 {
181 	struct platform_device *pdev;
182 	struct device *dev;
183 	u32 chip_id;
184 	enum spx5_target_chiptype target_ct;
185 	void __iomem *regs[NUM_TARGETS];
186 	int port_count;
187 	struct mutex lock; /* MAC reg lock */
188 	/* port structures are in net device */
189 	struct sparx5_port *ports[SPX5_PORTS];
190 	enum sparx5_core_clockfreq coreclock;
191 	/* Statistics */
192 	u32 num_stats;
193 	u32 num_ethtool_stats;
194 	const char * const *stats_layout;
195 	u64 *stats;
196 	/* Workqueue for reading stats */
197 	struct mutex queue_stats_lock;
198 	struct delayed_work stats_work;
199 	struct workqueue_struct *stats_queue;
200 	/* Notifiers */
201 	struct notifier_block netdevice_nb;
202 	struct notifier_block switchdev_nb;
203 	struct notifier_block switchdev_blocking_nb;
204 	/* Switch state */
205 	u8 base_mac[ETH_ALEN];
206 	/* Associated bridge device (when bridged) */
207 	struct net_device *hw_bridge_dev;
208 	/* Bridged interfaces */
209 	DECLARE_BITMAP(bridge_mask, SPX5_PORTS);
210 	DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS);
211 	DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS);
212 	DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS);
213 	/* SW MAC table */
214 	struct list_head mact_entries;
215 	/* mac table list (mact_entries) mutex */
216 	struct mutex mact_lock;
217 	struct delayed_work mact_work;
218 	struct workqueue_struct *mact_queue;
219 	/* Board specifics */
220 	bool sd_sgpio_remapping;
221 	/* Register based inj/xtr */
222 	int xtr_irq;
223 	/* Frame DMA */
224 	int fdma_irq;
225 	struct sparx5_rx rx;
226 	struct sparx5_tx tx;
227 };
228 
229 /* sparx5_switchdev.c */
230 int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
231 void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
232 
233 /* sparx5_packet.c */
234 struct frame_info {
235 	int src_port;
236 };
237 
238 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
239 void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
240 irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
241 int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
242 int sparx5_manual_injection_mode(struct sparx5 *sparx5);
243 void sparx5_port_inj_timer_setup(struct sparx5_port *port);
244 
245 /* sparx5_fdma.c */
246 int sparx5_fdma_start(struct sparx5 *sparx5);
247 int sparx5_fdma_stop(struct sparx5 *sparx5);
248 int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
249 irqreturn_t sparx5_fdma_handler(int irq, void *args);
250 
251 /* sparx5_mactable.c */
252 void sparx5_mact_pull_work(struct work_struct *work);
253 int sparx5_mact_learn(struct sparx5 *sparx5, int port,
254 		      const unsigned char mac[ETH_ALEN], u16 vid);
255 bool sparx5_mact_getnext(struct sparx5 *sparx5,
256 			 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
257 int sparx5_mact_forget(struct sparx5 *sparx5,
258 		       const unsigned char mac[ETH_ALEN], u16 vid);
259 int sparx5_add_mact_entry(struct sparx5 *sparx5,
260 			  struct sparx5_port *port,
261 			  const unsigned char *addr, u16 vid);
262 int sparx5_del_mact_entry(struct sparx5 *sparx5,
263 			  const unsigned char *addr,
264 			  u16 vid);
265 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr);
266 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr);
267 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs);
268 void sparx5_mact_init(struct sparx5 *sparx5);
269 
270 /* sparx5_vlan.c */
271 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
272 void sparx5_update_fwd(struct sparx5 *sparx5);
273 void sparx5_vlan_init(struct sparx5 *sparx5);
274 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
275 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
276 			bool untagged);
277 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid);
278 void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
279 
280 /* sparx5_calendar.c */
281 int sparx5_config_auto_calendar(struct sparx5 *sparx5);
282 int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
283 
284 /* sparx5_ethtool.c */
285 void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
286 int sparx_stats_init(struct sparx5 *sparx5);
287 
288 /* sparx5_netdev.c */
289 bool sparx5_netdevice_check(const struct net_device *dev);
290 struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
291 int sparx5_register_netdevs(struct sparx5 *sparx5);
292 void sparx5_destroy_netdevs(struct sparx5 *sparx5);
293 void sparx5_unregister_netdevs(struct sparx5 *sparx5);
294 
295 /* Clock period in picoseconds */
sparx5_clk_period(enum sparx5_core_clockfreq cclock)296 static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
297 {
298 	switch (cclock) {
299 	case SPX5_CORE_CLOCK_250MHZ:
300 		return 4000;
301 	case SPX5_CORE_CLOCK_500MHZ:
302 		return 2000;
303 	case SPX5_CORE_CLOCK_625MHZ:
304 	default:
305 		return 1600;
306 	}
307 }
308 
sparx5_is_baser(phy_interface_t interface)309 static inline bool sparx5_is_baser(phy_interface_t interface)
310 {
311 	return interface == PHY_INTERFACE_MODE_5GBASER ||
312 		   interface == PHY_INTERFACE_MODE_10GBASER ||
313 		   interface == PHY_INTERFACE_MODE_25GBASER;
314 }
315 
316 extern const struct phylink_mac_ops sparx5_phylink_mac_ops;
317 extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops;
318 extern const struct ethtool_ops sparx5_ethtool_ops;
319 
320 /* Calculate raw offset */
spx5_offset(int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)321 static inline __pure int spx5_offset(int id, int tinst, int tcnt,
322 				     int gbase, int ginst,
323 				     int gcnt, int gwidth,
324 				     int raddr, int rinst,
325 				     int rcnt, int rwidth)
326 {
327 	WARN_ON((tinst) >= tcnt);
328 	WARN_ON((ginst) >= gcnt);
329 	WARN_ON((rinst) >= rcnt);
330 	return gbase + ((ginst) * gwidth) +
331 		raddr + ((rinst) * rwidth);
332 }
333 
334 /* Read, Write and modify registers content.
335  * The register definition macros start at the id
336  */
spx5_addr(void __iomem * base[],int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)337 static inline void __iomem *spx5_addr(void __iomem *base[],
338 				      int id, int tinst, int tcnt,
339 				      int gbase, int ginst,
340 				      int gcnt, int gwidth,
341 				      int raddr, int rinst,
342 				      int rcnt, int rwidth)
343 {
344 	WARN_ON((tinst) >= tcnt);
345 	WARN_ON((ginst) >= gcnt);
346 	WARN_ON((rinst) >= rcnt);
347 	return base[id + (tinst)] +
348 		gbase + ((ginst) * gwidth) +
349 		raddr + ((rinst) * rwidth);
350 }
351 
spx5_inst_addr(void __iomem * base,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)352 static inline void __iomem *spx5_inst_addr(void __iomem *base,
353 					   int gbase, int ginst,
354 					   int gcnt, int gwidth,
355 					   int raddr, int rinst,
356 					   int rcnt, int rwidth)
357 {
358 	WARN_ON((ginst) >= gcnt);
359 	WARN_ON((rinst) >= rcnt);
360 	return base +
361 		gbase + ((ginst) * gwidth) +
362 		raddr + ((rinst) * rwidth);
363 }
364 
spx5_rd(struct sparx5 * sparx5,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)365 static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt,
366 			  int gbase, int ginst, int gcnt, int gwidth,
367 			  int raddr, int rinst, int rcnt, int rwidth)
368 {
369 	return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
370 			       gcnt, gwidth, raddr, rinst, rcnt, rwidth));
371 }
372 
spx5_inst_rd(void __iomem * iomem,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)373 static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt,
374 			       int gbase, int ginst, int gcnt, int gwidth,
375 			       int raddr, int rinst, int rcnt, int rwidth)
376 {
377 	return readl(spx5_inst_addr(iomem, gbase, ginst,
378 				     gcnt, gwidth, raddr, rinst, rcnt, rwidth));
379 }
380 
spx5_wr(u32 val,struct sparx5 * sparx5,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)381 static inline void spx5_wr(u32 val, struct sparx5 *sparx5,
382 			   int id, int tinst, int tcnt,
383 			   int gbase, int ginst, int gcnt, int gwidth,
384 			   int raddr, int rinst, int rcnt, int rwidth)
385 {
386 	writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt,
387 			      gbase, ginst, gcnt, gwidth,
388 			      raddr, rinst, rcnt, rwidth));
389 }
390 
spx5_inst_wr(u32 val,void __iomem * iomem,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)391 static inline void spx5_inst_wr(u32 val, void __iomem *iomem,
392 				int id, int tinst, int tcnt,
393 				int gbase, int ginst, int gcnt, int gwidth,
394 				int raddr, int rinst, int rcnt, int rwidth)
395 {
396 	writel(val, spx5_inst_addr(iomem,
397 				   gbase, ginst, gcnt, gwidth,
398 				   raddr, rinst, rcnt, rwidth));
399 }
400 
spx5_rmw(u32 val,u32 mask,struct sparx5 * sparx5,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)401 static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5,
402 			    int id, int tinst, int tcnt,
403 			    int gbase, int ginst, int gcnt, int gwidth,
404 			    int raddr, int rinst, int rcnt, int rwidth)
405 {
406 	u32 nval;
407 
408 	nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
409 			       gcnt, gwidth, raddr, rinst, rcnt, rwidth));
410 	nval = (nval & ~mask) | (val & mask);
411 	writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
412 			       gcnt, gwidth, raddr, rinst, rcnt, rwidth));
413 }
414 
spx5_inst_rmw(u32 val,u32 mask,void __iomem * iomem,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)415 static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
416 				 int id, int tinst, int tcnt,
417 				 int gbase, int ginst, int gcnt, int gwidth,
418 				 int raddr, int rinst, int rcnt, int rwidth)
419 {
420 	u32 nval;
421 
422 	nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
423 				    rinst, rcnt, rwidth));
424 	nval = (nval & ~mask) | (val & mask);
425 	writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
426 				    rinst, rcnt, rwidth));
427 }
428 
spx5_inst_get(struct sparx5 * sparx5,int id,int tinst)429 static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst)
430 {
431 	return sparx5->regs[id + tinst];
432 }
433 
spx5_reg_get(struct sparx5 * sparx5,int id,int tinst,int tcnt,int gbase,int ginst,int gcnt,int gwidth,int raddr,int rinst,int rcnt,int rwidth)434 static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5,
435 					 int id, int tinst, int tcnt,
436 					 int gbase, int ginst, int gcnt, int gwidth,
437 					 int raddr, int rinst, int rcnt, int rwidth)
438 {
439 	return spx5_addr(sparx5->regs, id, tinst, tcnt,
440 			 gbase, ginst, gcnt, gwidth,
441 			 raddr, rinst, rcnt, rwidth);
442 }
443 
444 #endif	/* __SPARX5_MAIN_H__ */
445