• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * include/linux/if_team.h - Network team device driver header
4  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5  */
6 #ifndef _LINUX_IF_TEAM_H_
7 #define _LINUX_IF_TEAM_H_
8 
9 #include <linux/netpoll.h>
10 #include <net/sch_generic.h>
11 #include <linux/types.h>
12 #include <uapi/linux/if_team.h>
13 
14 struct team_pcpu_stats {
15 	u64			rx_packets;
16 	u64			rx_bytes;
17 	u64			rx_multicast;
18 	u64			tx_packets;
19 	u64			tx_bytes;
20 	struct u64_stats_sync	syncp;
21 	u32			rx_dropped;
22 	u32			tx_dropped;
23 	u32			rx_nohandler;
24 };
25 
26 struct team;
27 
28 struct team_port {
29 	struct net_device *dev;
30 	struct hlist_node hlist; /* node in enabled ports hash list */
31 	struct list_head list; /* node in ordinary list */
32 	struct team *team;
33 	int index; /* index of enabled port. If disabled, it's set to -1 */
34 
35 	bool linkup; /* either state.linkup or user.linkup */
36 
37 	struct {
38 		bool linkup;
39 		u32 speed;
40 		u8 duplex;
41 	} state;
42 
43 	/* Values set by userspace */
44 	struct {
45 		bool linkup;
46 		bool linkup_enabled;
47 	} user;
48 
49 	/* Custom gennetlink interface related flags */
50 	bool changed;
51 	bool removed;
52 
53 	/*
54 	 * A place for storing original values of the device before it
55 	 * become a port.
56 	 */
57 	struct {
58 		unsigned char dev_addr[MAX_ADDR_LEN];
59 		unsigned int mtu;
60 	} orig;
61 
62 #ifdef CONFIG_NET_POLL_CONTROLLER
63 	struct netpoll *np;
64 #endif
65 
66 	s32 priority; /* lower number ~ higher priority */
67 	u16 queue_id;
68 	struct list_head qom_list; /* node in queue override mapping list */
69 	struct rcu_head	rcu;
70 	long mode_priv[0];
71 };
72 
team_port_get_rcu(const struct net_device * dev)73 static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
74 {
75 	return rcu_dereference(dev->rx_handler_data);
76 }
77 
team_port_enabled(struct team_port * port)78 static inline bool team_port_enabled(struct team_port *port)
79 {
80 	return port->index != -1;
81 }
82 
team_port_txable(struct team_port * port)83 static inline bool team_port_txable(struct team_port *port)
84 {
85 	return port->linkup && team_port_enabled(port);
86 }
87 
team_port_dev_txable(const struct net_device * port_dev)88 static inline bool team_port_dev_txable(const struct net_device *port_dev)
89 {
90 	struct team_port *port;
91 	bool txable;
92 
93 	rcu_read_lock();
94 	port = team_port_get_rcu(port_dev);
95 	txable = port ? team_port_txable(port) : false;
96 	rcu_read_unlock();
97 
98 	return txable;
99 }
100 
101 #ifdef CONFIG_NET_POLL_CONTROLLER
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)102 static inline void team_netpoll_send_skb(struct team_port *port,
103 					 struct sk_buff *skb)
104 {
105 	struct netpoll *np = port->np;
106 
107 	if (np)
108 		netpoll_send_skb(np, skb);
109 }
110 #else
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)111 static inline void team_netpoll_send_skb(struct team_port *port,
112 					 struct sk_buff *skb)
113 {
114 }
115 #endif
116 
117 struct team_mode_ops {
118 	int (*init)(struct team *team);
119 	void (*exit)(struct team *team);
120 	rx_handler_result_t (*receive)(struct team *team,
121 				       struct team_port *port,
122 				       struct sk_buff *skb);
123 	bool (*transmit)(struct team *team, struct sk_buff *skb);
124 	int (*port_enter)(struct team *team, struct team_port *port);
125 	void (*port_leave)(struct team *team, struct team_port *port);
126 	void (*port_change_dev_addr)(struct team *team, struct team_port *port);
127 	void (*port_enabled)(struct team *team, struct team_port *port);
128 	void (*port_disabled)(struct team *team, struct team_port *port);
129 };
130 
131 extern int team_modeop_port_enter(struct team *team, struct team_port *port);
132 extern void team_modeop_port_change_dev_addr(struct team *team,
133 					     struct team_port *port);
134 
135 enum team_option_type {
136 	TEAM_OPTION_TYPE_U32,
137 	TEAM_OPTION_TYPE_STRING,
138 	TEAM_OPTION_TYPE_BINARY,
139 	TEAM_OPTION_TYPE_BOOL,
140 	TEAM_OPTION_TYPE_S32,
141 };
142 
143 struct team_option_inst_info {
144 	u32 array_index;
145 	struct team_port *port; /* != NULL if per-port */
146 };
147 
148 struct team_gsetter_ctx {
149 	union {
150 		u32 u32_val;
151 		const char *str_val;
152 		struct {
153 			const void *ptr;
154 			u32 len;
155 		} bin_val;
156 		bool bool_val;
157 		s32 s32_val;
158 	} data;
159 	struct team_option_inst_info *info;
160 };
161 
162 struct team_option {
163 	struct list_head list;
164 	const char *name;
165 	bool per_port;
166 	unsigned int array_size; /* != 0 means the option is array */
167 	enum team_option_type type;
168 	int (*init)(struct team *team, struct team_option_inst_info *info);
169 	int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
170 	int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
171 };
172 
173 extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
174 extern void team_options_change_check(struct team *team);
175 
176 struct team_mode {
177 	const char *kind;
178 	struct module *owner;
179 	size_t priv_size;
180 	size_t port_priv_size;
181 	const struct team_mode_ops *ops;
182 	enum netdev_lag_tx_type lag_tx_type;
183 };
184 
185 #define TEAM_PORT_HASHBITS 4
186 #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
187 
188 #define TEAM_MODE_PRIV_LONGS 4
189 #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
190 
191 struct team {
192 	struct net_device *dev; /* associated netdevice */
193 	struct team_pcpu_stats __percpu *pcpu_stats;
194 
195 	const struct header_ops *header_ops_cache;
196 
197 	struct mutex lock; /* used for overall locking, e.g. port lists write */
198 
199 	/*
200 	 * List of enabled ports and their count
201 	 */
202 	int en_port_count;
203 	struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
204 
205 	struct list_head port_list; /* list of all ports */
206 
207 	struct list_head option_list;
208 	struct list_head option_inst_list; /* list of option instances */
209 
210 	const struct team_mode *mode;
211 	struct team_mode_ops ops;
212 	bool user_carrier_enabled;
213 	bool queue_override_enabled;
214 	struct list_head *qom_lists; /* array of queue override mapping lists */
215 	bool port_mtu_change_allowed;
216 	bool notifier_ctx;
217 	struct {
218 		unsigned int count;
219 		unsigned int interval; /* in ms */
220 		atomic_t count_pending;
221 		struct delayed_work dw;
222 	} notify_peers;
223 	struct {
224 		unsigned int count;
225 		unsigned int interval; /* in ms */
226 		atomic_t count_pending;
227 		struct delayed_work dw;
228 	} mcast_rejoin;
229 	struct lock_class_key team_lock_key;
230 	long mode_priv[TEAM_MODE_PRIV_LONGS];
231 };
232 
team_dev_queue_xmit(struct team * team,struct team_port * port,struct sk_buff * skb)233 static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
234 				      struct sk_buff *skb)
235 {
236 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
237 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
238 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
239 
240 	skb->dev = port->dev;
241 	if (unlikely(netpoll_tx_running(team->dev))) {
242 		team_netpoll_send_skb(port, skb);
243 		return 0;
244 	}
245 	return dev_queue_xmit(skb);
246 }
247 
team_port_index_hash(struct team * team,int port_index)248 static inline struct hlist_head *team_port_index_hash(struct team *team,
249 						      int port_index)
250 {
251 	return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
252 }
253 
team_get_port_by_index(struct team * team,int port_index)254 static inline struct team_port *team_get_port_by_index(struct team *team,
255 						       int port_index)
256 {
257 	struct team_port *port;
258 	struct hlist_head *head = team_port_index_hash(team, port_index);
259 
260 	hlist_for_each_entry(port, head, hlist)
261 		if (port->index == port_index)
262 			return port;
263 	return NULL;
264 }
265 
team_num_to_port_index(struct team * team,unsigned int num)266 static inline int team_num_to_port_index(struct team *team, unsigned int num)
267 {
268 	int en_port_count = READ_ONCE(team->en_port_count);
269 
270 	if (unlikely(!en_port_count))
271 		return 0;
272 	return num % en_port_count;
273 }
274 
team_get_port_by_index_rcu(struct team * team,int port_index)275 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
276 							   int port_index)
277 {
278 	struct team_port *port;
279 	struct hlist_head *head = team_port_index_hash(team, port_index);
280 
281 	hlist_for_each_entry_rcu(port, head, hlist)
282 		if (port->index == port_index)
283 			return port;
284 	return NULL;
285 }
286 
287 static inline struct team_port *
team_get_first_port_txable_rcu(struct team * team,struct team_port * port)288 team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
289 {
290 	struct team_port *cur;
291 
292 	if (likely(team_port_txable(port)))
293 		return port;
294 	cur = port;
295 	list_for_each_entry_continue_rcu(cur, &team->port_list, list)
296 		if (team_port_txable(cur))
297 			return cur;
298 	list_for_each_entry_rcu(cur, &team->port_list, list) {
299 		if (cur == port)
300 			break;
301 		if (team_port_txable(cur))
302 			return cur;
303 	}
304 	return NULL;
305 }
306 
307 extern int team_options_register(struct team *team,
308 				 const struct team_option *option,
309 				 size_t option_count);
310 extern void team_options_unregister(struct team *team,
311 				    const struct team_option *option,
312 				    size_t option_count);
313 extern int team_mode_register(const struct team_mode *mode);
314 extern void team_mode_unregister(const struct team_mode *mode);
315 
316 #define TEAM_DEFAULT_NUM_TX_QUEUES 16
317 #define TEAM_DEFAULT_NUM_RX_QUEUES 16
318 
319 #define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
320 
321 #endif /* _LINUX_IF_TEAM_H_ */
322