1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/types.h>
3 #include <linux/spinlock.h>
4 #include <linux/sock_diag.h>
5 #include <linux/unix_diag.h>
6 #include <linux/skbuff.h>
7 #include <linux/module.h>
8 #include <linux/uidgid.h>
9 #include <net/netlink.h>
10 #include <net/af_unix.h>
11 #include <net/tcp_states.h>
12 #include <net/sock.h>
13
sk_diag_dump_name(struct sock * sk,struct sk_buff * nlskb)14 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15 {
16 /* might or might not have unix_table_lock */
17 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
18
19 if (!addr)
20 return 0;
21
22 return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
23 addr->name->sun_path);
24 }
25
sk_diag_dump_vfs(struct sock * sk,struct sk_buff * nlskb)26 static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
27 {
28 struct dentry *dentry = unix_sk(sk)->path.dentry;
29
30 if (dentry) {
31 struct unix_diag_vfs uv = {
32 .udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
33 .udiag_vfs_dev = dentry->d_sb->s_dev,
34 };
35
36 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
37 }
38
39 return 0;
40 }
41
sk_diag_dump_peer(struct sock * sk,struct sk_buff * nlskb)42 static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
43 {
44 struct sock *peer;
45 int ino;
46
47 peer = unix_peer_get(sk);
48 if (peer) {
49 unix_state_lock(peer);
50 ino = sock_i_ino(peer);
51 unix_state_unlock(peer);
52 sock_put(peer);
53
54 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
55 }
56
57 return 0;
58 }
59
sk_diag_dump_icons(struct sock * sk,struct sk_buff * nlskb)60 static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
61 {
62 struct sk_buff *skb;
63 struct nlattr *attr;
64 u32 *buf;
65 int i;
66
67 if (sk->sk_state == TCP_LISTEN) {
68 spin_lock(&sk->sk_receive_queue.lock);
69
70 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
71 sk->sk_receive_queue.qlen * sizeof(u32));
72 if (!attr)
73 goto errout;
74
75 buf = nla_data(attr);
76 i = 0;
77 skb_queue_walk(&sk->sk_receive_queue, skb) {
78 struct sock *req, *peer;
79
80 req = skb->sk;
81 /*
82 * The state lock is outer for the same sk's
83 * queue lock. With the other's queue locked it's
84 * OK to lock the state.
85 */
86 unix_state_lock_nested(req, U_LOCK_DIAG);
87 peer = unix_sk(req)->peer;
88 buf[i++] = (peer ? sock_i_ino(peer) : 0);
89 unix_state_unlock(req);
90 }
91 spin_unlock(&sk->sk_receive_queue.lock);
92 }
93
94 return 0;
95
96 errout:
97 spin_unlock(&sk->sk_receive_queue.lock);
98 return -EMSGSIZE;
99 }
100
sk_diag_show_rqlen(struct sock * sk,struct sk_buff * nlskb)101 static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
102 {
103 struct unix_diag_rqlen rql;
104
105 if (sk->sk_state == TCP_LISTEN) {
106 rql.udiag_rqueue = sk->sk_receive_queue.qlen;
107 rql.udiag_wqueue = sk->sk_max_ack_backlog;
108 } else {
109 rql.udiag_rqueue = (u32) unix_inq_len(sk);
110 rql.udiag_wqueue = (u32) unix_outq_len(sk);
111 }
112
113 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
114 }
115
sk_diag_dump_uid(struct sock * sk,struct sk_buff * nlskb,struct user_namespace * user_ns)116 static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
117 struct user_namespace *user_ns)
118 {
119 uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
120 return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
121 }
122
sk_diag_fill(struct sock * sk,struct sk_buff * skb,struct unix_diag_req * req,struct user_namespace * user_ns,u32 portid,u32 seq,u32 flags,int sk_ino)123 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
124 struct user_namespace *user_ns,
125 u32 portid, u32 seq, u32 flags, int sk_ino)
126 {
127 struct nlmsghdr *nlh;
128 struct unix_diag_msg *rep;
129
130 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
131 flags);
132 if (!nlh)
133 return -EMSGSIZE;
134
135 rep = nlmsg_data(nlh);
136 rep->udiag_family = AF_UNIX;
137 rep->udiag_type = sk->sk_type;
138 rep->udiag_state = sk->sk_state;
139 rep->pad = 0;
140 rep->udiag_ino = sk_ino;
141 sock_diag_save_cookie(sk, rep->udiag_cookie);
142
143 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
144 sk_diag_dump_name(sk, skb))
145 goto out_nlmsg_trim;
146
147 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
148 sk_diag_dump_vfs(sk, skb))
149 goto out_nlmsg_trim;
150
151 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
152 sk_diag_dump_peer(sk, skb))
153 goto out_nlmsg_trim;
154
155 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
156 sk_diag_dump_icons(sk, skb))
157 goto out_nlmsg_trim;
158
159 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
160 sk_diag_show_rqlen(sk, skb))
161 goto out_nlmsg_trim;
162
163 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
164 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
165 goto out_nlmsg_trim;
166
167 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
168 goto out_nlmsg_trim;
169
170 if ((req->udiag_show & UDIAG_SHOW_UID) &&
171 sk_diag_dump_uid(sk, skb, user_ns))
172 goto out_nlmsg_trim;
173
174 nlmsg_end(skb, nlh);
175 return 0;
176
177 out_nlmsg_trim:
178 nlmsg_cancel(skb, nlh);
179 return -EMSGSIZE;
180 }
181
sk_diag_dump(struct sock * sk,struct sk_buff * skb,struct unix_diag_req * req,struct user_namespace * user_ns,u32 portid,u32 seq,u32 flags)182 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
183 struct user_namespace *user_ns,
184 u32 portid, u32 seq, u32 flags)
185 {
186 int sk_ino;
187
188 unix_state_lock(sk);
189 sk_ino = sock_i_ino(sk);
190 unix_state_unlock(sk);
191
192 if (!sk_ino)
193 return 0;
194
195 return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
196 }
197
unix_diag_dump(struct sk_buff * skb,struct netlink_callback * cb)198 static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
199 {
200 struct unix_diag_req *req;
201 int num, s_num, slot, s_slot;
202 struct net *net = sock_net(skb->sk);
203
204 req = nlmsg_data(cb->nlh);
205
206 s_slot = cb->args[0];
207 num = s_num = cb->args[1];
208
209 spin_lock(&unix_table_lock);
210 for (slot = s_slot;
211 slot < ARRAY_SIZE(unix_socket_table);
212 s_num = 0, slot++) {
213 struct sock *sk;
214
215 num = 0;
216 sk_for_each(sk, &unix_socket_table[slot]) {
217 if (!net_eq(sock_net(sk), net))
218 continue;
219 if (num < s_num)
220 goto next;
221 if (!(req->udiag_states & (1 << sk->sk_state)))
222 goto next;
223 if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
224 NETLINK_CB(cb->skb).portid,
225 cb->nlh->nlmsg_seq,
226 NLM_F_MULTI) < 0)
227 goto done;
228 next:
229 num++;
230 }
231 }
232 done:
233 spin_unlock(&unix_table_lock);
234 cb->args[0] = slot;
235 cb->args[1] = num;
236
237 return skb->len;
238 }
239
unix_lookup_by_ino(unsigned int ino)240 static struct sock *unix_lookup_by_ino(unsigned int ino)
241 {
242 int i;
243 struct sock *sk;
244
245 spin_lock(&unix_table_lock);
246 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
247 sk_for_each(sk, &unix_socket_table[i])
248 if (ino == sock_i_ino(sk)) {
249 sock_hold(sk);
250 spin_unlock(&unix_table_lock);
251
252 return sk;
253 }
254 }
255
256 spin_unlock(&unix_table_lock);
257 return NULL;
258 }
259
unix_diag_get_exact(struct sk_buff * in_skb,const struct nlmsghdr * nlh,struct unix_diag_req * req)260 static int unix_diag_get_exact(struct sk_buff *in_skb,
261 const struct nlmsghdr *nlh,
262 struct unix_diag_req *req)
263 {
264 int err = -EINVAL;
265 struct sock *sk;
266 struct sk_buff *rep;
267 unsigned int extra_len;
268 struct net *net = sock_net(in_skb->sk);
269
270 if (req->udiag_ino == 0)
271 goto out_nosk;
272
273 sk = unix_lookup_by_ino(req->udiag_ino);
274 err = -ENOENT;
275 if (sk == NULL)
276 goto out_nosk;
277 if (!net_eq(sock_net(sk), net))
278 goto out;
279
280 err = sock_diag_check_cookie(sk, req->udiag_cookie);
281 if (err)
282 goto out;
283
284 extra_len = 256;
285 again:
286 err = -ENOMEM;
287 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
288 if (!rep)
289 goto out;
290
291 err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
292 NETLINK_CB(in_skb).portid,
293 nlh->nlmsg_seq, 0, req->udiag_ino);
294 if (err < 0) {
295 nlmsg_free(rep);
296 extra_len += 256;
297 if (extra_len >= PAGE_SIZE)
298 goto out;
299
300 goto again;
301 }
302 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
303 MSG_DONTWAIT);
304 if (err > 0)
305 err = 0;
306 out:
307 if (sk)
308 sock_put(sk);
309 out_nosk:
310 return err;
311 }
312
unix_diag_handler_dump(struct sk_buff * skb,struct nlmsghdr * h)313 static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
314 {
315 int hdrlen = sizeof(struct unix_diag_req);
316 struct net *net = sock_net(skb->sk);
317
318 if (nlmsg_len(h) < hdrlen)
319 return -EINVAL;
320
321 if (h->nlmsg_flags & NLM_F_DUMP) {
322 struct netlink_dump_control c = {
323 .dump = unix_diag_dump,
324 };
325 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
326 } else
327 return unix_diag_get_exact(skb, h, nlmsg_data(h));
328 }
329
330 static const struct sock_diag_handler unix_diag_handler = {
331 .family = AF_UNIX,
332 .dump = unix_diag_handler_dump,
333 };
334
unix_diag_init(void)335 static int __init unix_diag_init(void)
336 {
337 return sock_diag_register(&unix_diag_handler);
338 }
339
unix_diag_exit(void)340 static void __exit unix_diag_exit(void)
341 {
342 sock_diag_unregister(&unix_diag_handler);
343 }
344
345 module_init(unix_diag_init);
346 module_exit(unix_diag_exit);
347 MODULE_LICENSE("GPL");
348 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
349