1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/netdevice.h>
4 #include <linux/notifier.h>
5 #include <linux/rtnetlink.h>
6 #include <net/busy_poll.h>
7 #include <net/net_namespace.h>
8 #include <net/netdev_queues.h>
9 #include <net/netdev_rx_queue.h>
10 #include <net/sock.h>
11 #include <net/xdp.h>
12 #include <net/xdp_sock.h>
13 
14 #include "dev.h"
15 #include "devmem.h"
16 #include "netdev-genl-gen.h"
17 
18 struct netdev_nl_dump_ctx {
19 	unsigned long	ifindex;
20 	unsigned int	rxq_idx;
21 	unsigned int	txq_idx;
22 	unsigned int	napi_id;
23 };
24 
netdev_dump_ctx(struct netlink_callback * cb)25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
26 {
27 	NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx);
28 
29 	return (struct netdev_nl_dump_ctx *)cb->ctx;
30 }
31 
32 static int
netdev_nl_dev_fill(struct net_device * netdev,struct sk_buff * rsp,const struct genl_info * info)33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
34 		   const struct genl_info *info)
35 {
36 	u64 xsk_features = 0;
37 	u64 xdp_rx_meta = 0;
38 	void *hdr;
39 
40 	hdr = genlmsg_iput(rsp, info);
41 	if (!hdr)
42 		return -EMSGSIZE;
43 
44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
45 	if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
46 		xdp_rx_meta |= flag;
47 XDP_METADATA_KFUNC_xxx
48 #undef XDP_METADATA_KFUNC
49 
50 	if (netdev->xsk_tx_metadata_ops) {
51 		if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
52 			xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
53 		if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
54 			xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
55 	}
56 
57 	if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
58 	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
59 			      netdev->xdp_features, NETDEV_A_DEV_PAD) ||
60 	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
61 			      xdp_rx_meta, NETDEV_A_DEV_PAD) ||
62 	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
63 			      xsk_features, NETDEV_A_DEV_PAD))
64 		goto err_cancel_msg;
65 
66 	if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
67 		if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
68 				netdev->xdp_zc_max_segs))
69 			goto err_cancel_msg;
70 	}
71 
72 	genlmsg_end(rsp, hdr);
73 
74 	return 0;
75 
76 err_cancel_msg:
77 	genlmsg_cancel(rsp, hdr);
78 	return -EMSGSIZE;
79 }
80 
81 static void
netdev_genl_dev_notify(struct net_device * netdev,int cmd)82 netdev_genl_dev_notify(struct net_device *netdev, int cmd)
83 {
84 	struct genl_info info;
85 	struct sk_buff *ntf;
86 
87 	if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
88 				NETDEV_NLGRP_MGMT))
89 		return;
90 
91 	genl_info_init_ntf(&info, &netdev_nl_family, cmd);
92 
93 	ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
94 	if (!ntf)
95 		return;
96 
97 	if (netdev_nl_dev_fill(netdev, ntf, &info)) {
98 		nlmsg_free(ntf);
99 		return;
100 	}
101 
102 	genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
103 				0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
104 }
105 
netdev_nl_dev_get_doit(struct sk_buff * skb,struct genl_info * info)106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
107 {
108 	struct net_device *netdev;
109 	struct sk_buff *rsp;
110 	u32 ifindex;
111 	int err;
112 
113 	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
114 		return -EINVAL;
115 
116 	ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
117 
118 	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
119 	if (!rsp)
120 		return -ENOMEM;
121 
122 	rtnl_lock();
123 
124 	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
125 	if (netdev)
126 		err = netdev_nl_dev_fill(netdev, rsp, info);
127 	else
128 		err = -ENODEV;
129 
130 	rtnl_unlock();
131 
132 	if (err)
133 		goto err_free_msg;
134 
135 	return genlmsg_reply(rsp, info);
136 
137 err_free_msg:
138 	nlmsg_free(rsp);
139 	return err;
140 }
141 
netdev_nl_dev_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
143 {
144 	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
145 	struct net *net = sock_net(skb->sk);
146 	struct net_device *netdev;
147 	int err = 0;
148 
149 	rtnl_lock();
150 	for_each_netdev_dump(net, netdev, ctx->ifindex) {
151 		err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
152 		if (err < 0)
153 			break;
154 	}
155 	rtnl_unlock();
156 
157 	return err;
158 }
159 
160 static int
netdev_nl_napi_fill_one(struct sk_buff * rsp,struct napi_struct * napi,const struct genl_info * info)161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
162 			const struct genl_info *info)
163 {
164 	void *hdr;
165 	pid_t pid;
166 
167 	if (!(napi->dev->flags & IFF_UP))
168 		return 0;
169 
170 	hdr = genlmsg_iput(rsp, info);
171 	if (!hdr)
172 		return -EMSGSIZE;
173 
174 	if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
175 		goto nla_put_failure;
176 
177 	if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
178 		goto nla_put_failure;
179 
180 	if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
181 		goto nla_put_failure;
182 
183 	if (napi->thread) {
184 		pid = task_pid_nr(napi->thread);
185 		if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
186 			goto nla_put_failure;
187 	}
188 
189 	genlmsg_end(rsp, hdr);
190 
191 	return 0;
192 
193 nla_put_failure:
194 	genlmsg_cancel(rsp, hdr);
195 	return -EMSGSIZE;
196 }
197 
netdev_nl_napi_get_doit(struct sk_buff * skb,struct genl_info * info)198 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
199 {
200 	struct napi_struct *napi;
201 	struct sk_buff *rsp;
202 	u32 napi_id;
203 	int err;
204 
205 	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
206 		return -EINVAL;
207 
208 	napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
209 
210 	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
211 	if (!rsp)
212 		return -ENOMEM;
213 
214 	rtnl_lock();
215 	rcu_read_lock();
216 
217 	napi = netdev_napi_by_id(genl_info_net(info), napi_id);
218 	if (napi) {
219 		err = netdev_nl_napi_fill_one(rsp, napi, info);
220 	} else {
221 		NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
222 		err = -ENOENT;
223 	}
224 
225 	rcu_read_unlock();
226 	rtnl_unlock();
227 
228 	if (err) {
229 		goto err_free_msg;
230 	} else if (!rsp->len) {
231 		err = -ENOENT;
232 		goto err_free_msg;
233 	}
234 
235 	return genlmsg_reply(rsp, info);
236 
237 err_free_msg:
238 	nlmsg_free(rsp);
239 	return err;
240 }
241 
242 static int
netdev_nl_napi_dump_one(struct net_device * netdev,struct sk_buff * rsp,const struct genl_info * info,struct netdev_nl_dump_ctx * ctx)243 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
244 			const struct genl_info *info,
245 			struct netdev_nl_dump_ctx *ctx)
246 {
247 	struct napi_struct *napi;
248 	int err = 0;
249 
250 	if (!(netdev->flags & IFF_UP))
251 		return err;
252 
253 	list_for_each_entry(napi, &netdev->napi_list, dev_list) {
254 		if (napi->napi_id < MIN_NAPI_ID)
255 			continue;
256 		if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
257 			continue;
258 
259 		err = netdev_nl_napi_fill_one(rsp, napi, info);
260 		if (err)
261 			return err;
262 		ctx->napi_id = napi->napi_id;
263 	}
264 	return err;
265 }
266 
netdev_nl_napi_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)267 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
268 {
269 	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
270 	const struct genl_info *info = genl_info_dump(cb);
271 	struct net *net = sock_net(skb->sk);
272 	struct net_device *netdev;
273 	u32 ifindex = 0;
274 	int err = 0;
275 
276 	if (info->attrs[NETDEV_A_NAPI_IFINDEX])
277 		ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
278 
279 	rtnl_lock();
280 	if (ifindex) {
281 		netdev = __dev_get_by_index(net, ifindex);
282 		if (netdev)
283 			err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
284 		else
285 			err = -ENODEV;
286 	} else {
287 		for_each_netdev_dump(net, netdev, ctx->ifindex) {
288 			err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
289 			if (err < 0)
290 				break;
291 			ctx->napi_id = 0;
292 		}
293 	}
294 	rtnl_unlock();
295 
296 	return err;
297 }
298 
299 static int
netdev_nl_queue_fill_one(struct sk_buff * rsp,struct net_device * netdev,u32 q_idx,u32 q_type,const struct genl_info * info)300 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
301 			 u32 q_idx, u32 q_type, const struct genl_info *info)
302 {
303 	struct net_devmem_dmabuf_binding *binding;
304 	struct netdev_rx_queue *rxq;
305 	struct netdev_queue *txq;
306 	void *hdr;
307 
308 	hdr = genlmsg_iput(rsp, info);
309 	if (!hdr)
310 		return -EMSGSIZE;
311 
312 	if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
313 	    nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
314 	    nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
315 		goto nla_put_failure;
316 
317 	switch (q_type) {
318 	case NETDEV_QUEUE_TYPE_RX:
319 		rxq = __netif_get_rx_queue(netdev, q_idx);
320 		if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
321 					     rxq->napi->napi_id))
322 			goto nla_put_failure;
323 
324 		binding = rxq->mp_params.mp_priv;
325 		if (binding &&
326 		    nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
327 			goto nla_put_failure;
328 
329 		break;
330 	case NETDEV_QUEUE_TYPE_TX:
331 		txq = netdev_get_tx_queue(netdev, q_idx);
332 		if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
333 					     txq->napi->napi_id))
334 			goto nla_put_failure;
335 	}
336 
337 	genlmsg_end(rsp, hdr);
338 
339 	return 0;
340 
341 nla_put_failure:
342 	genlmsg_cancel(rsp, hdr);
343 	return -EMSGSIZE;
344 }
345 
netdev_nl_queue_validate(struct net_device * netdev,u32 q_id,u32 q_type)346 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
347 				    u32 q_type)
348 {
349 	switch (q_type) {
350 	case NETDEV_QUEUE_TYPE_RX:
351 		if (q_id >= netdev->real_num_rx_queues)
352 			return -EINVAL;
353 		return 0;
354 	case NETDEV_QUEUE_TYPE_TX:
355 		if (q_id >= netdev->real_num_tx_queues)
356 			return -EINVAL;
357 	}
358 	return 0;
359 }
360 
361 static int
netdev_nl_queue_fill(struct sk_buff * rsp,struct net_device * netdev,u32 q_idx,u32 q_type,const struct genl_info * info)362 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
363 		     u32 q_type, const struct genl_info *info)
364 {
365 	int err;
366 
367 	if (!(netdev->flags & IFF_UP))
368 		return -ENOENT;
369 
370 	err = netdev_nl_queue_validate(netdev, q_idx, q_type);
371 	if (err)
372 		return err;
373 
374 	return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
375 }
376 
netdev_nl_queue_get_doit(struct sk_buff * skb,struct genl_info * info)377 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
378 {
379 	u32 q_id, q_type, ifindex;
380 	struct net_device *netdev;
381 	struct sk_buff *rsp;
382 	int err;
383 
384 	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
385 	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
386 	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
387 		return -EINVAL;
388 
389 	q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
390 	q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
391 	ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
392 
393 	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
394 	if (!rsp)
395 		return -ENOMEM;
396 
397 	rtnl_lock();
398 
399 	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
400 	if (netdev)
401 		err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
402 	else
403 		err = -ENODEV;
404 
405 	rtnl_unlock();
406 
407 	if (err)
408 		goto err_free_msg;
409 
410 	return genlmsg_reply(rsp, info);
411 
412 err_free_msg:
413 	nlmsg_free(rsp);
414 	return err;
415 }
416 
417 static int
netdev_nl_queue_dump_one(struct net_device * netdev,struct sk_buff * rsp,const struct genl_info * info,struct netdev_nl_dump_ctx * ctx)418 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
419 			 const struct genl_info *info,
420 			 struct netdev_nl_dump_ctx *ctx)
421 {
422 	int err = 0;
423 
424 	if (!(netdev->flags & IFF_UP))
425 		return err;
426 
427 	for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
428 		err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
429 					       NETDEV_QUEUE_TYPE_RX, info);
430 		if (err)
431 			return err;
432 	}
433 	for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
434 		err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
435 					       NETDEV_QUEUE_TYPE_TX, info);
436 		if (err)
437 			return err;
438 	}
439 
440 	return err;
441 }
442 
netdev_nl_queue_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)443 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
444 {
445 	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
446 	const struct genl_info *info = genl_info_dump(cb);
447 	struct net *net = sock_net(skb->sk);
448 	struct net_device *netdev;
449 	u32 ifindex = 0;
450 	int err = 0;
451 
452 	if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
453 		ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
454 
455 	rtnl_lock();
456 	if (ifindex) {
457 		netdev = __dev_get_by_index(net, ifindex);
458 		if (netdev)
459 			err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
460 		else
461 			err = -ENODEV;
462 	} else {
463 		for_each_netdev_dump(net, netdev, ctx->ifindex) {
464 			err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
465 			if (err < 0)
466 				break;
467 			ctx->rxq_idx = 0;
468 			ctx->txq_idx = 0;
469 		}
470 	}
471 	rtnl_unlock();
472 
473 	return err;
474 }
475 
476 #define NETDEV_STAT_NOT_SET		(~0ULL)
477 
netdev_nl_stats_add(void * _sum,const void * _add,size_t size)478 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
479 {
480 	const u64 *add = _add;
481 	u64 *sum = _sum;
482 
483 	while (size) {
484 		if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
485 			*sum += *add;
486 		sum++;
487 		add++;
488 		size -= 8;
489 	}
490 }
491 
netdev_stat_put(struct sk_buff * rsp,unsigned int attr_id,u64 value)492 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
493 {
494 	if (value == NETDEV_STAT_NOT_SET)
495 		return 0;
496 	return nla_put_uint(rsp, attr_id, value);
497 }
498 
499 static int
netdev_nl_stats_write_rx(struct sk_buff * rsp,struct netdev_queue_stats_rx * rx)500 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
501 {
502 	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
503 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
504 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
505 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
506 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
507 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
508 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
509 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
510 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
511 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
512 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
513 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
514 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
515 		return -EMSGSIZE;
516 	return 0;
517 }
518 
519 static int
netdev_nl_stats_write_tx(struct sk_buff * rsp,struct netdev_queue_stats_tx * tx)520 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
521 {
522 	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
523 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
524 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
525 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
526 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
527 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
528 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
529 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
530 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
531 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
532 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
533 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
534 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
535 		return -EMSGSIZE;
536 	return 0;
537 }
538 
539 static int
netdev_nl_stats_queue(struct net_device * netdev,struct sk_buff * rsp,u32 q_type,int i,const struct genl_info * info)540 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
541 		      u32 q_type, int i, const struct genl_info *info)
542 {
543 	const struct netdev_stat_ops *ops = netdev->stat_ops;
544 	struct netdev_queue_stats_rx rx;
545 	struct netdev_queue_stats_tx tx;
546 	void *hdr;
547 
548 	hdr = genlmsg_iput(rsp, info);
549 	if (!hdr)
550 		return -EMSGSIZE;
551 	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
552 	    nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
553 	    nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
554 		goto nla_put_failure;
555 
556 	switch (q_type) {
557 	case NETDEV_QUEUE_TYPE_RX:
558 		memset(&rx, 0xff, sizeof(rx));
559 		ops->get_queue_stats_rx(netdev, i, &rx);
560 		if (!memchr_inv(&rx, 0xff, sizeof(rx)))
561 			goto nla_cancel;
562 		if (netdev_nl_stats_write_rx(rsp, &rx))
563 			goto nla_put_failure;
564 		break;
565 	case NETDEV_QUEUE_TYPE_TX:
566 		memset(&tx, 0xff, sizeof(tx));
567 		ops->get_queue_stats_tx(netdev, i, &tx);
568 		if (!memchr_inv(&tx, 0xff, sizeof(tx)))
569 			goto nla_cancel;
570 		if (netdev_nl_stats_write_tx(rsp, &tx))
571 			goto nla_put_failure;
572 		break;
573 	}
574 
575 	genlmsg_end(rsp, hdr);
576 	return 0;
577 
578 nla_cancel:
579 	genlmsg_cancel(rsp, hdr);
580 	return 0;
581 nla_put_failure:
582 	genlmsg_cancel(rsp, hdr);
583 	return -EMSGSIZE;
584 }
585 
586 static int
netdev_nl_stats_by_queue(struct net_device * netdev,struct sk_buff * rsp,const struct genl_info * info,struct netdev_nl_dump_ctx * ctx)587 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
588 			 const struct genl_info *info,
589 			 struct netdev_nl_dump_ctx *ctx)
590 {
591 	const struct netdev_stat_ops *ops = netdev->stat_ops;
592 	int i, err;
593 
594 	if (!(netdev->flags & IFF_UP))
595 		return 0;
596 
597 	i = ctx->rxq_idx;
598 	while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
599 		err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
600 					    i, info);
601 		if (err)
602 			return err;
603 		ctx->rxq_idx = ++i;
604 	}
605 	i = ctx->txq_idx;
606 	while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
607 		err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
608 					    i, info);
609 		if (err)
610 			return err;
611 		ctx->txq_idx = ++i;
612 	}
613 
614 	ctx->rxq_idx = 0;
615 	ctx->txq_idx = 0;
616 	return 0;
617 }
618 
619 /**
620  * netdev_stat_queue_sum() - add up queue stats from range of queues
621  * @netdev:	net_device
622  * @rx_start:	index of the first Rx queue to query
623  * @rx_end:	index after the last Rx queue (first *not* to query)
624  * @rx_sum:	output Rx stats, should be already initialized
625  * @tx_start:	index of the first Tx queue to query
626  * @tx_end:	index after the last Tx queue (first *not* to query)
627  * @tx_sum:	output Tx stats, should be already initialized
628  *
629  * Add stats from [start, end) range of queue IDs to *x_sum structs.
630  * The sum structs must be already initialized. Usually this
631  * helper is invoked from the .get_base_stats callbacks of drivers
632  * to account for stats of disabled queues. In that case the ranges
633  * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues).
634  */
netdev_stat_queue_sum(struct net_device * netdev,int rx_start,int rx_end,struct netdev_queue_stats_rx * rx_sum,int tx_start,int tx_end,struct netdev_queue_stats_tx * tx_sum)635 void netdev_stat_queue_sum(struct net_device *netdev,
636 			   int rx_start, int rx_end,
637 			   struct netdev_queue_stats_rx *rx_sum,
638 			   int tx_start, int tx_end,
639 			   struct netdev_queue_stats_tx *tx_sum)
640 {
641 	const struct netdev_stat_ops *ops;
642 	struct netdev_queue_stats_rx rx;
643 	struct netdev_queue_stats_tx tx;
644 	int i;
645 
646 	ops = netdev->stat_ops;
647 
648 	for (i = rx_start; i < rx_end; i++) {
649 		memset(&rx, 0xff, sizeof(rx));
650 		if (ops->get_queue_stats_rx)
651 			ops->get_queue_stats_rx(netdev, i, &rx);
652 		netdev_nl_stats_add(rx_sum, &rx, sizeof(rx));
653 	}
654 	for (i = tx_start; i < tx_end; i++) {
655 		memset(&tx, 0xff, sizeof(tx));
656 		if (ops->get_queue_stats_tx)
657 			ops->get_queue_stats_tx(netdev, i, &tx);
658 		netdev_nl_stats_add(tx_sum, &tx, sizeof(tx));
659 	}
660 }
661 EXPORT_SYMBOL(netdev_stat_queue_sum);
662 
663 static int
netdev_nl_stats_by_netdev(struct net_device * netdev,struct sk_buff * rsp,const struct genl_info * info)664 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
665 			  const struct genl_info *info)
666 {
667 	struct netdev_queue_stats_rx rx_sum;
668 	struct netdev_queue_stats_tx tx_sum;
669 	void *hdr;
670 
671 	/* Netdev can't guarantee any complete counters */
672 	if (!netdev->stat_ops->get_base_stats)
673 		return 0;
674 
675 	memset(&rx_sum, 0xff, sizeof(rx_sum));
676 	memset(&tx_sum, 0xff, sizeof(tx_sum));
677 
678 	netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum);
679 
680 	/* The op was there, but nothing reported, don't bother */
681 	if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
682 	    !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
683 		return 0;
684 
685 	hdr = genlmsg_iput(rsp, info);
686 	if (!hdr)
687 		return -EMSGSIZE;
688 	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
689 		goto nla_put_failure;
690 
691 	netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum,
692 			      0, netdev->real_num_tx_queues, &tx_sum);
693 
694 	if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
695 	    netdev_nl_stats_write_tx(rsp, &tx_sum))
696 		goto nla_put_failure;
697 
698 	genlmsg_end(rsp, hdr);
699 	return 0;
700 
701 nla_put_failure:
702 	genlmsg_cancel(rsp, hdr);
703 	return -EMSGSIZE;
704 }
705 
706 static int
netdev_nl_qstats_get_dump_one(struct net_device * netdev,unsigned int scope,struct sk_buff * skb,const struct genl_info * info,struct netdev_nl_dump_ctx * ctx)707 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
708 			      struct sk_buff *skb, const struct genl_info *info,
709 			      struct netdev_nl_dump_ctx *ctx)
710 {
711 	if (!netdev->stat_ops)
712 		return 0;
713 
714 	switch (scope) {
715 	case 0:
716 		return netdev_nl_stats_by_netdev(netdev, skb, info);
717 	case NETDEV_QSTATS_SCOPE_QUEUE:
718 		return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
719 	}
720 
721 	return -EINVAL;	/* Should not happen, per netlink policy */
722 }
723 
netdev_nl_qstats_get_dumpit(struct sk_buff * skb,struct netlink_callback * cb)724 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
725 				struct netlink_callback *cb)
726 {
727 	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
728 	const struct genl_info *info = genl_info_dump(cb);
729 	struct net *net = sock_net(skb->sk);
730 	struct net_device *netdev;
731 	unsigned int ifindex;
732 	unsigned int scope;
733 	int err = 0;
734 
735 	scope = 0;
736 	if (info->attrs[NETDEV_A_QSTATS_SCOPE])
737 		scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
738 
739 	ifindex = 0;
740 	if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
741 		ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
742 
743 	rtnl_lock();
744 	if (ifindex) {
745 		netdev = __dev_get_by_index(net, ifindex);
746 		if (netdev && netdev->stat_ops) {
747 			err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
748 							    info, ctx);
749 		} else {
750 			NL_SET_BAD_ATTR(info->extack,
751 					info->attrs[NETDEV_A_QSTATS_IFINDEX]);
752 			err = netdev ? -EOPNOTSUPP : -ENODEV;
753 		}
754 	} else {
755 		for_each_netdev_dump(net, netdev, ctx->ifindex) {
756 			err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
757 							    info, ctx);
758 			if (err < 0)
759 				break;
760 		}
761 	}
762 	rtnl_unlock();
763 
764 	return err;
765 }
766 
netdev_nl_bind_rx_doit(struct sk_buff * skb,struct genl_info * info)767 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
768 {
769 	struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
770 	struct net_devmem_dmabuf_binding *binding;
771 	struct list_head *sock_binding_list;
772 	u32 ifindex, dmabuf_fd, rxq_idx;
773 	struct net_device *netdev;
774 	struct sk_buff *rsp;
775 	struct nlattr *attr;
776 	int rem, err = 0;
777 	void *hdr;
778 
779 	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
780 	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
781 	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
782 		return -EINVAL;
783 
784 	ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
785 	dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
786 
787 	sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
788 					     NETLINK_CB(skb).sk);
789 	if (IS_ERR(sock_binding_list))
790 		return PTR_ERR(sock_binding_list);
791 
792 	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
793 	if (!rsp)
794 		return -ENOMEM;
795 
796 	hdr = genlmsg_iput(rsp, info);
797 	if (!hdr) {
798 		err = -EMSGSIZE;
799 		goto err_genlmsg_free;
800 	}
801 
802 	rtnl_lock();
803 
804 	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
805 	if (!netdev || !netif_device_present(netdev)) {
806 		err = -ENODEV;
807 		goto err_unlock;
808 	}
809 
810 	if (dev_xdp_prog_count(netdev)) {
811 		NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
812 		err = -EEXIST;
813 		goto err_unlock;
814 	}
815 
816 	binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
817 	if (IS_ERR(binding)) {
818 		err = PTR_ERR(binding);
819 		goto err_unlock;
820 	}
821 
822 	nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
823 			       genlmsg_data(info->genlhdr),
824 			       genlmsg_len(info->genlhdr), rem) {
825 		err = nla_parse_nested(
826 			tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
827 			netdev_queue_id_nl_policy, info->extack);
828 		if (err < 0)
829 			goto err_unbind;
830 
831 		if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
832 		    NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
833 			err = -EINVAL;
834 			goto err_unbind;
835 		}
836 
837 		if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
838 			NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
839 			err = -EINVAL;
840 			goto err_unbind;
841 		}
842 
843 		rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
844 
845 		err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
846 						      info->extack);
847 		if (err)
848 			goto err_unbind;
849 	}
850 
851 	list_add(&binding->list, sock_binding_list);
852 
853 	nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
854 	genlmsg_end(rsp, hdr);
855 
856 	err = genlmsg_reply(rsp, info);
857 	if (err)
858 		goto err_unbind;
859 
860 	rtnl_unlock();
861 
862 	return 0;
863 
864 err_unbind:
865 	net_devmem_unbind_dmabuf(binding);
866 err_unlock:
867 	rtnl_unlock();
868 err_genlmsg_free:
869 	nlmsg_free(rsp);
870 	return err;
871 }
872 
netdev_nl_sock_priv_init(struct list_head * priv)873 void netdev_nl_sock_priv_init(struct list_head *priv)
874 {
875 	INIT_LIST_HEAD(priv);
876 }
877 
netdev_nl_sock_priv_destroy(struct list_head * priv)878 void netdev_nl_sock_priv_destroy(struct list_head *priv)
879 {
880 	struct net_devmem_dmabuf_binding *binding;
881 	struct net_devmem_dmabuf_binding *temp;
882 
883 	list_for_each_entry_safe(binding, temp, priv, list) {
884 		rtnl_lock();
885 		net_devmem_unbind_dmabuf(binding);
886 		rtnl_unlock();
887 	}
888 }
889 
netdev_genl_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)890 static int netdev_genl_netdevice_event(struct notifier_block *nb,
891 				       unsigned long event, void *ptr)
892 {
893 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
894 
895 	switch (event) {
896 	case NETDEV_REGISTER:
897 		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
898 		break;
899 	case NETDEV_UNREGISTER:
900 		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
901 		break;
902 	case NETDEV_XDP_FEAT_CHANGE:
903 		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
904 		break;
905 	}
906 
907 	return NOTIFY_OK;
908 }
909 
910 static struct notifier_block netdev_genl_nb = {
911 	.notifier_call	= netdev_genl_netdevice_event,
912 };
913 
netdev_genl_init(void)914 static int __init netdev_genl_init(void)
915 {
916 	int err;
917 
918 	err = register_netdevice_notifier(&netdev_genl_nb);
919 	if (err)
920 		return err;
921 
922 	err = genl_register_family(&netdev_nl_family);
923 	if (err)
924 		goto err_unreg_ntf;
925 
926 	return 0;
927 
928 err_unreg_ntf:
929 	unregister_netdevice_notifier(&netdev_genl_nb);
930 	return err;
931 }
932 
933 subsys_initcall(netdev_genl_init);
934