1// SPDX-License-Identifier: GPL-2.0
2#include <linux/netdevice.h>
3#include <linux/proc_fs.h>
4#include <linux/seq_file.h>
5#include <net/wext.h>
6#include <net/hotdata.h>
7
8#include "dev.h"
9
10static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
11{
12	unsigned long ifindex = *pos;
13	struct net_device *dev;
14
15	for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
16		*pos = dev->ifindex;
17		return dev;
18	}
19	return NULL;
20}
21
22static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
23	__acquires(RCU)
24{
25	rcu_read_lock();
26	if (!*pos)
27		return SEQ_START_TOKEN;
28
29	return dev_seq_from_index(seq, pos);
30}
31
32static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
33{
34	++*pos;
35	return dev_seq_from_index(seq, pos);
36}
37
38static void dev_seq_stop(struct seq_file *seq, void *v)
39	__releases(RCU)
40{
41	rcu_read_unlock();
42}
43
44static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
45{
46	struct rtnl_link_stats64 temp;
47	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
48
49	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
50		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
51		   dev->name, stats->rx_bytes, stats->rx_packets,
52		   stats->rx_errors,
53		   stats->rx_dropped + stats->rx_missed_errors,
54		   stats->rx_fifo_errors,
55		   stats->rx_length_errors + stats->rx_over_errors +
56		    stats->rx_crc_errors + stats->rx_frame_errors,
57		   stats->rx_compressed, stats->multicast,
58		   stats->tx_bytes, stats->tx_packets,
59		   stats->tx_errors, stats->tx_dropped,
60		   stats->tx_fifo_errors, stats->collisions,
61		   stats->tx_carrier_errors +
62		    stats->tx_aborted_errors +
63		    stats->tx_window_errors +
64		    stats->tx_heartbeat_errors,
65		   stats->tx_compressed);
66}
67
68/*
69 *	Called from the PROCfs module. This now uses the new arbitrary sized
70 *	/proc/net interface to create /proc/net/dev
71 */
72static int dev_seq_show(struct seq_file *seq, void *v)
73{
74	if (v == SEQ_START_TOKEN)
75		seq_puts(seq, "Inter-|   Receive                            "
76			      "                    |  Transmit\n"
77			      " face |bytes    packets errs drop fifo frame "
78			      "compressed multicast|bytes    packets errs "
79			      "drop fifo colls carrier compressed\n");
80	else
81		dev_seq_printf_stats(seq, v);
82	return 0;
83}
84
85static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
86{
87	return skb_queue_len_lockless(&sd->input_pkt_queue);
88}
89
90static u32 softnet_process_queue_len(struct softnet_data *sd)
91{
92	return skb_queue_len_lockless(&sd->process_queue);
93}
94
95static struct softnet_data *softnet_get_online(loff_t *pos)
96{
97	struct softnet_data *sd = NULL;
98
99	while (*pos < nr_cpu_ids)
100		if (cpu_online(*pos)) {
101			sd = &per_cpu(softnet_data, *pos);
102			break;
103		} else
104			++*pos;
105	return sd;
106}
107
108static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
109{
110	return softnet_get_online(pos);
111}
112
113static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
114{
115	++*pos;
116	return softnet_get_online(pos);
117}
118
119static void softnet_seq_stop(struct seq_file *seq, void *v)
120{
121}
122
123static int softnet_seq_show(struct seq_file *seq, void *v)
124{
125	struct softnet_data *sd = v;
126	u32 input_qlen = softnet_input_pkt_queue_len(sd);
127	u32 process_qlen = softnet_process_queue_len(sd);
128	unsigned int flow_limit_count = 0;
129
130#ifdef CONFIG_NET_FLOW_LIMIT
131	struct sd_flow_limit *fl;
132
133	rcu_read_lock();
134	fl = rcu_dereference(sd->flow_limit);
135	if (fl)
136		flow_limit_count = fl->count;
137	rcu_read_unlock();
138#endif
139
140	/* the index is the CPU id owing this sd. Since offline CPUs are not
141	 * displayed, it would be othrwise not trivial for the user-space
142	 * mapping the data a specific CPU
143	 */
144	seq_printf(seq,
145		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
146		   "%08x %08x\n",
147		   sd->processed, sd->dropped, sd->time_squeeze, 0,
148		   0, 0, 0, 0, /* was fastroute */
149		   0,	/* was cpu_collision */
150		   sd->received_rps, flow_limit_count,
151		   input_qlen + process_qlen, (int)seq->index,
152		   input_qlen, process_qlen);
153	return 0;
154}
155
156static const struct seq_operations dev_seq_ops = {
157	.start = dev_seq_start,
158	.next  = dev_seq_next,
159	.stop  = dev_seq_stop,
160	.show  = dev_seq_show,
161};
162
163static const struct seq_operations softnet_seq_ops = {
164	.start = softnet_seq_start,
165	.next  = softnet_seq_next,
166	.stop  = softnet_seq_stop,
167	.show  = softnet_seq_show,
168};
169
170static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
171{
172	struct list_head *ptype_list = NULL;
173	struct packet_type *pt = NULL;
174	struct net_device *dev;
175	loff_t i = 0;
176	int t;
177
178	for_each_netdev_rcu(seq_file_net(seq), dev) {
179		ptype_list = &dev->ptype_all;
180		list_for_each_entry_rcu(pt, ptype_list, list) {
181			if (i == pos)
182				return pt;
183			++i;
184		}
185	}
186
187	list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
188		if (i == pos)
189			return pt;
190		++i;
191	}
192
193	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
194		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
195			if (i == pos)
196				return pt;
197			++i;
198		}
199	}
200	return NULL;
201}
202
203static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
204	__acquires(RCU)
205{
206	rcu_read_lock();
207	return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
208}
209
210static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
211{
212	struct net_device *dev;
213	struct packet_type *pt;
214	struct list_head *nxt;
215	int hash;
216
217	++*pos;
218	if (v == SEQ_START_TOKEN)
219		return ptype_get_idx(seq, 0);
220
221	pt = v;
222	nxt = pt->list.next;
223	if (pt->dev) {
224		if (nxt != &pt->dev->ptype_all)
225			goto found;
226
227		dev = pt->dev;
228		for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
229			if (!list_empty(&dev->ptype_all)) {
230				nxt = dev->ptype_all.next;
231				goto found;
232			}
233		}
234
235		nxt = net_hotdata.ptype_all.next;
236		goto ptype_all;
237	}
238
239	if (pt->type == htons(ETH_P_ALL)) {
240ptype_all:
241		if (nxt != &net_hotdata.ptype_all)
242			goto found;
243		hash = 0;
244		nxt = ptype_base[0].next;
245	} else
246		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
247
248	while (nxt == &ptype_base[hash]) {
249		if (++hash >= PTYPE_HASH_SIZE)
250			return NULL;
251		nxt = ptype_base[hash].next;
252	}
253found:
254	return list_entry(nxt, struct packet_type, list);
255}
256
257static void ptype_seq_stop(struct seq_file *seq, void *v)
258	__releases(RCU)
259{
260	rcu_read_unlock();
261}
262
263static int ptype_seq_show(struct seq_file *seq, void *v)
264{
265	struct packet_type *pt = v;
266
267	if (v == SEQ_START_TOKEN)
268		seq_puts(seq, "Type Device      Function\n");
269	else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
270		 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
271		if (pt->type == htons(ETH_P_ALL))
272			seq_puts(seq, "ALL ");
273		else
274			seq_printf(seq, "%04x", ntohs(pt->type));
275
276		seq_printf(seq, " %-8s %ps\n",
277			   pt->dev ? pt->dev->name : "", pt->func);
278	}
279
280	return 0;
281}
282
283static const struct seq_operations ptype_seq_ops = {
284	.start = ptype_seq_start,
285	.next  = ptype_seq_next,
286	.stop  = ptype_seq_stop,
287	.show  = ptype_seq_show,
288};
289
290static int __net_init dev_proc_net_init(struct net *net)
291{
292	int rc = -ENOMEM;
293
294	if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
295			sizeof(struct seq_net_private)))
296		goto out;
297	if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
298			 &softnet_seq_ops))
299		goto out_dev;
300	if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
301			sizeof(struct seq_net_private)))
302		goto out_softnet;
303
304	if (wext_proc_init(net))
305		goto out_ptype;
306	rc = 0;
307out:
308	return rc;
309out_ptype:
310	remove_proc_entry("ptype", net->proc_net);
311out_softnet:
312	remove_proc_entry("softnet_stat", net->proc_net);
313out_dev:
314	remove_proc_entry("dev", net->proc_net);
315	goto out;
316}
317
318static void __net_exit dev_proc_net_exit(struct net *net)
319{
320	wext_proc_exit(net);
321
322	remove_proc_entry("ptype", net->proc_net);
323	remove_proc_entry("softnet_stat", net->proc_net);
324	remove_proc_entry("dev", net->proc_net);
325}
326
327static struct pernet_operations __net_initdata dev_proc_ops = {
328	.init = dev_proc_net_init,
329	.exit = dev_proc_net_exit,
330};
331
332static int dev_mc_seq_show(struct seq_file *seq, void *v)
333{
334	struct netdev_hw_addr *ha;
335	struct net_device *dev = v;
336
337	if (v == SEQ_START_TOKEN)
338		return 0;
339
340	netif_addr_lock_bh(dev);
341	netdev_for_each_mc_addr(ha, dev) {
342		seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
343			   dev->ifindex, dev->name,
344			   ha->refcount, ha->global_use,
345			   (int)dev->addr_len, ha->addr);
346	}
347	netif_addr_unlock_bh(dev);
348	return 0;
349}
350
351static const struct seq_operations dev_mc_seq_ops = {
352	.start = dev_seq_start,
353	.next  = dev_seq_next,
354	.stop  = dev_seq_stop,
355	.show  = dev_mc_seq_show,
356};
357
358static int __net_init dev_mc_net_init(struct net *net)
359{
360	if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
361			sizeof(struct seq_net_private)))
362		return -ENOMEM;
363	return 0;
364}
365
366static void __net_exit dev_mc_net_exit(struct net *net)
367{
368	remove_proc_entry("dev_mcast", net->proc_net);
369}
370
371static struct pernet_operations __net_initdata dev_mc_net_ops = {
372	.init = dev_mc_net_init,
373	.exit = dev_mc_net_exit,
374};
375
376int __init dev_proc_init(void)
377{
378	int ret = register_pernet_subsys(&dev_proc_ops);
379	if (!ret)
380		return register_pernet_subsys(&dev_mc_net_ops);
381	return ret;
382}
383