• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
2 /*
3  * Copyright 2008 - 2016 Freescale Semiconductor Inc.
4  */
5 
6 #ifndef __DPAA_H
7 #define __DPAA_H
8 
9 #include <linux/netdevice.h>
10 #include <linux/refcount.h>
11 #include <net/xdp.h>
12 #include <soc/fsl/qman.h>
13 #include <soc/fsl/bman.h>
14 
15 #include "fman.h"
16 #include "mac.h"
17 #include "dpaa_eth_trace.h"
18 
19 /* Number of prioritised traffic classes */
20 #define DPAA_TC_NUM		4
21 /* Number of Tx queues per traffic class */
22 #define DPAA_TC_TXQ_NUM		NR_CPUS
23 /* Total number of Tx queues */
24 #define DPAA_ETH_TXQ_NUM	(DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
25 
26 /* More detailed FQ types - used for fine-grained WQ assignments */
27 enum dpaa_fq_type {
28 	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
29 	FQ_TYPE_RX_ERROR,	/* Rx Error FQs */
30 	FQ_TYPE_RX_PCD,		/* Rx Parse Classify Distribute FQs */
31 	FQ_TYPE_TX,		/* "Real" Tx FQs */
32 	FQ_TYPE_TX_CONFIRM,	/* Tx default Conf FQ (actually an Rx FQ) */
33 	FQ_TYPE_TX_CONF_MQ,	/* Tx conf FQs (one for each Tx FQ) */
34 	FQ_TYPE_TX_ERROR,	/* Tx Error FQs (these are actually Rx FQs) */
35 };
36 
37 struct dpaa_fq {
38 	struct qman_fq fq_base;
39 	struct list_head list;
40 	struct net_device *net_dev;
41 	bool init;
42 	u32 fqid;
43 	u32 flags;
44 	u16 channel;
45 	u8 wq;
46 	enum dpaa_fq_type fq_type;
47 	struct xdp_rxq_info xdp_rxq;
48 };
49 
50 struct dpaa_fq_cbs {
51 	struct qman_fq rx_defq;
52 	struct qman_fq tx_defq;
53 	struct qman_fq rx_errq;
54 	struct qman_fq tx_errq;
55 	struct qman_fq egress_ern;
56 };
57 
58 struct dpaa_priv;
59 
60 struct dpaa_bp {
61 	/* used in the DMA mapping operations */
62 	struct dpaa_priv *priv;
63 	/* current number of buffers in the buffer pool alloted to each CPU */
64 	int __percpu *percpu_count;
65 	/* all buffers allocated for this pool have this raw size */
66 	size_t raw_size;
67 	/* all buffers in this pool have this same usable size */
68 	size_t size;
69 	/* the buffer pools are initialized with config_count buffers for each
70 	 * CPU; at runtime the number of buffers per CPU is constantly brought
71 	 * back to this level
72 	 */
73 	u16 config_count;
74 	u8 bpid;
75 	struct bman_pool *pool;
76 	/* bpool can be seeded before use by this cb */
77 	int (*seed_cb)(struct dpaa_bp *);
78 	/* bpool can be emptied before freeing by this cb */
79 	void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
80 	refcount_t refs;
81 };
82 
83 struct dpaa_rx_errors {
84 	u64 dme;		/* DMA Error */
85 	u64 fpe;		/* Frame Physical Error */
86 	u64 fse;		/* Frame Size Error */
87 	u64 phe;		/* Header Error */
88 };
89 
90 /* Counters for QMan ERN frames - one counter per rejection code */
91 struct dpaa_ern_cnt {
92 	u64 cg_tdrop;		/* Congestion group taildrop */
93 	u64 wred;		/* WRED congestion */
94 	u64 err_cond;		/* Error condition */
95 	u64 early_window;	/* Order restoration, frame too early */
96 	u64 late_window;	/* Order restoration, frame too late */
97 	u64 fq_tdrop;		/* FQ taildrop */
98 	u64 fq_retired;		/* FQ is retired */
99 	u64 orp_zero;		/* ORP disabled */
100 };
101 
102 struct dpaa_napi_portal {
103 	struct napi_struct napi;
104 	struct qman_portal *p;
105 	bool down;
106 	int xdp_act;
107 };
108 
109 struct dpaa_percpu_priv {
110 	struct net_device *net_dev;
111 	struct dpaa_napi_portal np;
112 	u64 in_interrupt;
113 	u64 tx_confirm;
114 	/* fragmented (non-linear) skbuffs received from the stack */
115 	u64 tx_frag_skbuffs;
116 	struct rtnl_link_stats64 stats;
117 	struct dpaa_rx_errors rx_errors;
118 	struct dpaa_ern_cnt ern_cnt;
119 };
120 
121 struct dpaa_buffer_layout {
122 	u16 priv_data_size;
123 };
124 
125 /* Information to be used on the Tx confirmation path. Stored just
126  * before the start of the transmit buffer. Maximum size allowed
127  * is DPAA_TX_PRIV_DATA_SIZE bytes.
128  */
129 struct dpaa_eth_swbp {
130 	struct sk_buff *skb;
131 	struct xdp_frame *xdpf;
132 };
133 
134 struct dpaa_priv {
135 	struct dpaa_percpu_priv __percpu *percpu_priv;
136 	struct dpaa_bp *dpaa_bp;
137 	/* Store here the needed Tx headroom for convenience and speed
138 	 * (even though it can be computed based on the fields of buf_layout)
139 	 */
140 	u16 tx_headroom;
141 	struct net_device *net_dev;
142 	struct mac_device *mac_dev;
143 	struct device *rx_dma_dev;
144 	struct device *tx_dma_dev;
145 	struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
146 	struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
147 
148 	u16 channel;
149 	struct list_head dpaa_fq_list;
150 
151 	u8 num_tc;
152 	bool keygen_in_use;
153 	u32 msg_enable;	/* net_device message level */
154 
155 	struct {
156 		/* All egress queues to a given net device belong to one
157 		 * (and the same) congestion group.
158 		 */
159 		struct qman_cgr cgr;
160 		/* If congested, when it began. Used for performance stats. */
161 		u32 congestion_start_jiffies;
162 		/* Number of jiffies the Tx port was congested. */
163 		u32 congested_jiffies;
164 		/* Counter for the number of times the CGR
165 		 * entered congestion state
166 		 */
167 		u32 cgr_congested_count;
168 	} cgr_data;
169 	/* Use a per-port CGR for ingress traffic. */
170 	bool use_ingress_cgr;
171 	struct qman_cgr ingress_cgr;
172 
173 	struct dpaa_buffer_layout buf_layout[2];
174 	u16 rx_headroom;
175 
176 	bool tx_tstamp; /* Tx timestamping enabled */
177 	bool rx_tstamp; /* Rx timestamping enabled */
178 
179 	struct bpf_prog *xdp_prog;
180 };
181 
182 /* from dpaa_ethtool.c */
183 extern const struct ethtool_ops dpaa_ethtool_ops;
184 
185 /* from dpaa_eth_sysfs.c */
186 void dpaa_eth_sysfs_remove(struct device *dev);
187 void dpaa_eth_sysfs_init(struct device *dev);
188 #endif	/* __DPAA_H */
189