1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/netdevice.h>
3 #include <linux/proc_fs.h>
4 #include <linux/seq_file.h>
5 #include <net/wext.h>
6
7 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
8
9 #define get_bucket(x) ((x) >> BUCKET_SPACE)
10 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
11 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
12
13 extern struct list_head ptype_all __read_mostly;
14 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
15
dev_from_same_bucket(struct seq_file * seq,loff_t * pos)16 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
17 {
18 struct net *net = seq_file_net(seq);
19 struct net_device *dev;
20 struct hlist_head *h;
21 unsigned int count = 0, offset = get_offset(*pos);
22
23 h = &net->dev_index_head[get_bucket(*pos)];
24 hlist_for_each_entry_rcu(dev, h, index_hlist) {
25 if (++count == offset)
26 return dev;
27 }
28
29 return NULL;
30 }
31
dev_from_bucket(struct seq_file * seq,loff_t * pos)32 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
33 {
34 struct net_device *dev;
35 unsigned int bucket;
36
37 do {
38 dev = dev_from_same_bucket(seq, pos);
39 if (dev)
40 return dev;
41
42 bucket = get_bucket(*pos) + 1;
43 *pos = set_bucket_offset(bucket, 1);
44 } while (bucket < NETDEV_HASHENTRIES);
45
46 return NULL;
47 }
48
49 /*
50 * This is invoked by the /proc filesystem handler to display a device
51 * in detail.
52 */
dev_seq_start(struct seq_file * seq,loff_t * pos)53 static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
54 __acquires(RCU)
55 {
56 rcu_read_lock();
57 if (!*pos)
58 return SEQ_START_TOKEN;
59
60 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
61 return NULL;
62
63 return dev_from_bucket(seq, pos);
64 }
65
dev_seq_next(struct seq_file * seq,void * v,loff_t * pos)66 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
67 {
68 ++*pos;
69 return dev_from_bucket(seq, pos);
70 }
71
dev_seq_stop(struct seq_file * seq,void * v)72 static void dev_seq_stop(struct seq_file *seq, void *v)
73 __releases(RCU)
74 {
75 rcu_read_unlock();
76 }
77
dev_seq_printf_stats(struct seq_file * seq,struct net_device * dev)78 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
79 {
80 struct rtnl_link_stats64 temp;
81 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
82
83 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
84 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
85 dev->name, stats->rx_bytes, stats->rx_packets,
86 stats->rx_errors,
87 stats->rx_dropped + stats->rx_missed_errors,
88 stats->rx_fifo_errors,
89 stats->rx_length_errors + stats->rx_over_errors +
90 stats->rx_crc_errors + stats->rx_frame_errors,
91 stats->rx_compressed, stats->multicast,
92 stats->tx_bytes, stats->tx_packets,
93 stats->tx_errors, stats->tx_dropped,
94 stats->tx_fifo_errors, stats->collisions,
95 stats->tx_carrier_errors +
96 stats->tx_aborted_errors +
97 stats->tx_window_errors +
98 stats->tx_heartbeat_errors,
99 stats->tx_compressed);
100 }
101
102 /*
103 * Called from the PROCfs module. This now uses the new arbitrary sized
104 * /proc/net interface to create /proc/net/dev
105 */
dev_seq_show(struct seq_file * seq,void * v)106 static int dev_seq_show(struct seq_file *seq, void *v)
107 {
108 if (v == SEQ_START_TOKEN)
109 seq_puts(seq, "Inter-| Receive "
110 " | Transmit\n"
111 " face |bytes packets errs drop fifo frame "
112 "compressed multicast|bytes packets errs "
113 "drop fifo colls carrier compressed\n");
114 else
115 dev_seq_printf_stats(seq, v);
116 return 0;
117 }
118
softnet_backlog_len(struct softnet_data * sd)119 static u32 softnet_backlog_len(struct softnet_data *sd)
120 {
121 return skb_queue_len_lockless(&sd->input_pkt_queue) +
122 skb_queue_len_lockless(&sd->process_queue);
123 }
124
softnet_get_online(loff_t * pos)125 static struct softnet_data *softnet_get_online(loff_t *pos)
126 {
127 struct softnet_data *sd = NULL;
128
129 while (*pos < nr_cpu_ids)
130 if (cpu_online(*pos)) {
131 sd = &per_cpu(softnet_data, *pos);
132 break;
133 } else
134 ++*pos;
135 return sd;
136 }
137
softnet_seq_start(struct seq_file * seq,loff_t * pos)138 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
139 {
140 return softnet_get_online(pos);
141 }
142
softnet_seq_next(struct seq_file * seq,void * v,loff_t * pos)143 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
144 {
145 ++*pos;
146 return softnet_get_online(pos);
147 }
148
softnet_seq_stop(struct seq_file * seq,void * v)149 static void softnet_seq_stop(struct seq_file *seq, void *v)
150 {
151 }
152
softnet_seq_show(struct seq_file * seq,void * v)153 static int softnet_seq_show(struct seq_file *seq, void *v)
154 {
155 struct softnet_data *sd = v;
156 unsigned int flow_limit_count = 0;
157
158 #ifdef CONFIG_NET_FLOW_LIMIT
159 struct sd_flow_limit *fl;
160
161 rcu_read_lock();
162 fl = rcu_dereference(sd->flow_limit);
163 if (fl)
164 flow_limit_count = fl->count;
165 rcu_read_unlock();
166 #endif
167
168 /* the index is the CPU id owing this sd. Since offline CPUs are not
169 * displayed, it would be othrwise not trivial for the user-space
170 * mapping the data a specific CPU
171 */
172 seq_printf(seq,
173 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
174 sd->processed, sd->dropped, sd->time_squeeze, 0,
175 0, 0, 0, 0, /* was fastroute */
176 0, /* was cpu_collision */
177 sd->received_rps, flow_limit_count,
178 softnet_backlog_len(sd), (int)seq->index);
179 return 0;
180 }
181
182 static const struct seq_operations dev_seq_ops = {
183 .start = dev_seq_start,
184 .next = dev_seq_next,
185 .stop = dev_seq_stop,
186 .show = dev_seq_show,
187 };
188
189 static const struct seq_operations softnet_seq_ops = {
190 .start = softnet_seq_start,
191 .next = softnet_seq_next,
192 .stop = softnet_seq_stop,
193 .show = softnet_seq_show,
194 };
195
ptype_get_idx(struct seq_file * seq,loff_t pos)196 static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
197 {
198 struct list_head *ptype_list = NULL;
199 struct packet_type *pt = NULL;
200 struct net_device *dev;
201 loff_t i = 0;
202 int t;
203
204 for_each_netdev_rcu(seq_file_net(seq), dev) {
205 ptype_list = &dev->ptype_all;
206 list_for_each_entry_rcu(pt, ptype_list, list) {
207 if (i == pos)
208 return pt;
209 ++i;
210 }
211 }
212
213 list_for_each_entry_rcu(pt, &ptype_all, list) {
214 if (i == pos)
215 return pt;
216 ++i;
217 }
218
219 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
220 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
221 if (i == pos)
222 return pt;
223 ++i;
224 }
225 }
226 return NULL;
227 }
228
ptype_seq_start(struct seq_file * seq,loff_t * pos)229 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
230 __acquires(RCU)
231 {
232 rcu_read_lock();
233 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
234 }
235
ptype_seq_next(struct seq_file * seq,void * v,loff_t * pos)236 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
237 {
238 struct net_device *dev;
239 struct packet_type *pt;
240 struct list_head *nxt;
241 int hash;
242
243 ++*pos;
244 if (v == SEQ_START_TOKEN)
245 return ptype_get_idx(seq, 0);
246
247 pt = v;
248 nxt = pt->list.next;
249 if (pt->dev) {
250 if (nxt != &pt->dev->ptype_all)
251 goto found;
252
253 dev = pt->dev;
254 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
255 if (!list_empty(&dev->ptype_all)) {
256 nxt = dev->ptype_all.next;
257 goto found;
258 }
259 }
260
261 nxt = ptype_all.next;
262 goto ptype_all;
263 }
264
265 if (pt->type == htons(ETH_P_ALL)) {
266 ptype_all:
267 if (nxt != &ptype_all)
268 goto found;
269 hash = 0;
270 nxt = ptype_base[0].next;
271 } else
272 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
273
274 while (nxt == &ptype_base[hash]) {
275 if (++hash >= PTYPE_HASH_SIZE)
276 return NULL;
277 nxt = ptype_base[hash].next;
278 }
279 found:
280 return list_entry(nxt, struct packet_type, list);
281 }
282
ptype_seq_stop(struct seq_file * seq,void * v)283 static void ptype_seq_stop(struct seq_file *seq, void *v)
284 __releases(RCU)
285 {
286 rcu_read_unlock();
287 }
288
ptype_seq_show(struct seq_file * seq,void * v)289 static int ptype_seq_show(struct seq_file *seq, void *v)
290 {
291 struct packet_type *pt = v;
292
293 if (v == SEQ_START_TOKEN)
294 seq_puts(seq, "Type Device Function\n");
295 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
296 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
297 if (pt->type == htons(ETH_P_ALL))
298 seq_puts(seq, "ALL ");
299 else
300 seq_printf(seq, "%04x", ntohs(pt->type));
301
302 seq_printf(seq, " %-8s %ps\n",
303 pt->dev ? pt->dev->name : "", pt->func);
304 }
305
306 return 0;
307 }
308
309 static const struct seq_operations ptype_seq_ops = {
310 .start = ptype_seq_start,
311 .next = ptype_seq_next,
312 .stop = ptype_seq_stop,
313 .show = ptype_seq_show,
314 };
315
dev_proc_net_init(struct net * net)316 static int __net_init dev_proc_net_init(struct net *net)
317 {
318 int rc = -ENOMEM;
319
320 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
321 sizeof(struct seq_net_private)))
322 goto out;
323 if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
324 &softnet_seq_ops))
325 goto out_dev;
326 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
327 sizeof(struct seq_net_private)))
328 goto out_softnet;
329
330 if (wext_proc_init(net))
331 goto out_ptype;
332 rc = 0;
333 out:
334 return rc;
335 out_ptype:
336 remove_proc_entry("ptype", net->proc_net);
337 out_softnet:
338 remove_proc_entry("softnet_stat", net->proc_net);
339 out_dev:
340 remove_proc_entry("dev", net->proc_net);
341 goto out;
342 }
343
dev_proc_net_exit(struct net * net)344 static void __net_exit dev_proc_net_exit(struct net *net)
345 {
346 wext_proc_exit(net);
347
348 remove_proc_entry("ptype", net->proc_net);
349 remove_proc_entry("softnet_stat", net->proc_net);
350 remove_proc_entry("dev", net->proc_net);
351 }
352
353 static struct pernet_operations __net_initdata dev_proc_ops = {
354 .init = dev_proc_net_init,
355 .exit = dev_proc_net_exit,
356 };
357
dev_mc_seq_show(struct seq_file * seq,void * v)358 static int dev_mc_seq_show(struct seq_file *seq, void *v)
359 {
360 struct netdev_hw_addr *ha;
361 struct net_device *dev = v;
362
363 if (v == SEQ_START_TOKEN)
364 return 0;
365
366 netif_addr_lock_bh(dev);
367 netdev_for_each_mc_addr(ha, dev) {
368 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
369 dev->ifindex, dev->name,
370 ha->refcount, ha->global_use,
371 (int)dev->addr_len, ha->addr);
372 }
373 netif_addr_unlock_bh(dev);
374 return 0;
375 }
376
377 static const struct seq_operations dev_mc_seq_ops = {
378 .start = dev_seq_start,
379 .next = dev_seq_next,
380 .stop = dev_seq_stop,
381 .show = dev_mc_seq_show,
382 };
383
dev_mc_net_init(struct net * net)384 static int __net_init dev_mc_net_init(struct net *net)
385 {
386 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
387 sizeof(struct seq_net_private)))
388 return -ENOMEM;
389 return 0;
390 }
391
dev_mc_net_exit(struct net * net)392 static void __net_exit dev_mc_net_exit(struct net *net)
393 {
394 remove_proc_entry("dev_mcast", net->proc_net);
395 }
396
397 static struct pernet_operations __net_initdata dev_mc_net_ops = {
398 .init = dev_mc_net_init,
399 .exit = dev_mc_net_exit,
400 };
401
dev_proc_init(void)402 int __init dev_proc_init(void)
403 {
404 int ret = register_pernet_subsys(&dev_proc_ops);
405 if (!ret)
406 return register_pernet_subsys(&dev_mc_net_ops);
407 return ret;
408 }
409