1// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets monitoring support
3 *
4 * Copyright(c) 2019 Intel Corporation.
5 *
6 * Author: Bj��rn T��pel <bjorn.topel@intel.com>
7 */
8
9#include <linux/module.h>
10#include <net/xdp_sock.h>
11#include <linux/xdp_diag.h>
12#include <linux/sock_diag.h>
13
14#include "xsk_queue.h"
15#include "xsk.h"
16
17static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
18{
19	struct xdp_diag_info di = {};
20
21	di.ifindex = xs->dev ? xs->dev->ifindex : 0;
22	di.queue_id = xs->queue_id;
23	return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
24}
25
26static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
27			     struct sk_buff *nlskb)
28{
29	struct xdp_diag_ring dr = {};
30
31	dr.entries = queue->nentries;
32	return nla_put(nlskb, nl_type, sizeof(dr), &dr);
33}
34
35static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
36				  struct sk_buff *nlskb)
37{
38	int err = 0;
39
40	if (xs->rx)
41		err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
42	if (!err && xs->tx)
43		err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
44	return err;
45}
46
47static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
48{
49	struct xsk_buff_pool *pool = xs->pool;
50	struct xdp_umem *umem = xs->umem;
51	struct xdp_diag_umem du = {};
52	int err;
53
54	if (!umem)
55		return 0;
56
57	du.id = umem->id;
58	du.size = umem->size;
59	du.num_pages = umem->npgs;
60	du.chunk_size = umem->chunk_size;
61	du.headroom = umem->headroom;
62	du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
63	du.queue_id = pool ? pool->queue_id : 0;
64	du.flags = 0;
65	if (umem->zc)
66		du.flags |= XDP_DU_F_ZEROCOPY;
67	du.refs = refcount_read(&umem->users);
68
69	err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
70	if (!err && pool && pool->fq)
71		err = xsk_diag_put_ring(pool->fq,
72					XDP_DIAG_UMEM_FILL_RING, nlskb);
73	if (!err && pool && pool->cq)
74		err = xsk_diag_put_ring(pool->cq,
75					XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
76	return err;
77}
78
79static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
80{
81	struct xdp_diag_stats du = {};
82
83	du.n_rx_dropped = xs->rx_dropped;
84	du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
85	du.n_rx_full = xs->rx_queue_full;
86	du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
87	du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
88	du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
89	return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
90}
91
92static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
93			 struct xdp_diag_req *req,
94			 struct user_namespace *user_ns,
95			 u32 portid, u32 seq, u32 flags, int sk_ino)
96{
97	struct xdp_sock *xs = xdp_sk(sk);
98	struct xdp_diag_msg *msg;
99	struct nlmsghdr *nlh;
100
101	nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
102			flags);
103	if (!nlh)
104		return -EMSGSIZE;
105
106	msg = nlmsg_data(nlh);
107	memset(msg, 0, sizeof(*msg));
108	msg->xdiag_family = AF_XDP;
109	msg->xdiag_type = sk->sk_type;
110	msg->xdiag_ino = sk_ino;
111	sock_diag_save_cookie(sk, msg->xdiag_cookie);
112
113	mutex_lock(&xs->mutex);
114	if (READ_ONCE(xs->state) == XSK_UNBOUND)
115		goto out_nlmsg_trim;
116
117	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
118		goto out_nlmsg_trim;
119
120	if ((req->xdiag_show & XDP_SHOW_INFO) &&
121	    nla_put_u32(nlskb, XDP_DIAG_UID,
122			from_kuid_munged(user_ns, sock_i_uid(sk))))
123		goto out_nlmsg_trim;
124
125	if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
126	    xsk_diag_put_rings_cfg(xs, nlskb))
127		goto out_nlmsg_trim;
128
129	if ((req->xdiag_show & XDP_SHOW_UMEM) &&
130	    xsk_diag_put_umem(xs, nlskb))
131		goto out_nlmsg_trim;
132
133	if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
134	    sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
135		goto out_nlmsg_trim;
136
137	if ((req->xdiag_show & XDP_SHOW_STATS) &&
138	    xsk_diag_put_stats(xs, nlskb))
139		goto out_nlmsg_trim;
140
141	mutex_unlock(&xs->mutex);
142	nlmsg_end(nlskb, nlh);
143	return 0;
144
145out_nlmsg_trim:
146	mutex_unlock(&xs->mutex);
147	nlmsg_cancel(nlskb, nlh);
148	return -EMSGSIZE;
149}
150
151static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
152{
153	struct xdp_diag_req *req = nlmsg_data(cb->nlh);
154	struct net *net = sock_net(nlskb->sk);
155	int num = 0, s_num = cb->args[0];
156	struct sock *sk;
157
158	mutex_lock(&net->xdp.lock);
159
160	sk_for_each(sk, &net->xdp.list) {
161		if (!net_eq(sock_net(sk), net))
162			continue;
163		if (num++ < s_num)
164			continue;
165
166		if (xsk_diag_fill(sk, nlskb, req,
167				  sk_user_ns(NETLINK_CB(cb->skb).sk),
168				  NETLINK_CB(cb->skb).portid,
169				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
170				  sock_i_ino(sk)) < 0) {
171			num--;
172			break;
173		}
174	}
175
176	mutex_unlock(&net->xdp.lock);
177	cb->args[0] = num;
178	return nlskb->len;
179}
180
181static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
182{
183	struct netlink_dump_control c = { .dump = xsk_diag_dump };
184	int hdrlen = sizeof(struct xdp_diag_req);
185	struct net *net = sock_net(nlskb->sk);
186
187	if (nlmsg_len(hdr) < hdrlen)
188		return -EINVAL;
189
190	if (!(hdr->nlmsg_flags & NLM_F_DUMP))
191		return -EOPNOTSUPP;
192
193	return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
194}
195
196static const struct sock_diag_handler xsk_diag_handler = {
197	.owner = THIS_MODULE,
198	.family = AF_XDP,
199	.dump = xsk_diag_handler_dump,
200};
201
202static int __init xsk_diag_init(void)
203{
204	return sock_diag_register(&xsk_diag_handler);
205}
206
207static void __exit xsk_diag_exit(void)
208{
209	sock_diag_unregister(&xsk_diag_handler);
210}
211
212module_init(xsk_diag_init);
213module_exit(xsk_diag_exit);
214MODULE_LICENSE("GPL");
215MODULE_DESCRIPTION("XDP socket monitoring via SOCK_DIAG");
216MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);
217