1#include <linux/module.h>
2#include <linux/inet_diag.h>
3#include <linux/sock_diag.h>
4#include <net/sctp/sctp.h>
5
6static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
7 void *info);
8
9
10static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
11 struct sock *sk,
12 struct sctp_association *asoc)
13{
14 union sctp_addr laddr, paddr;
15 struct dst_entry *dst;
16 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
17
18 laddr = list_entry(asoc->base.bind_addr.address_list.next,
19 struct sctp_sockaddr_entry, list)->a;
20 paddr = asoc->peer.primary_path->ipaddr;
21 dst = asoc->peer.primary_path->dst;
22
23 r->idiag_family = sk->sk_family;
24 r->id.idiag_sport = htons(asoc->base.bind_addr.port);
25 r->id.idiag_dport = htons(asoc->peer.port);
26 r->id.idiag_if = dst ? dst->dev->ifindex : 0;
27 sock_diag_save_cookie(sk, r->id.idiag_cookie);
28
29#if IS_ENABLED(CONFIG_IPV6)
30 if (sk->sk_family == AF_INET6) {
31 *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
32 *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
33 } else
34#endif
35 {
36 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
37 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
38
39 r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
40 r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
41 }
42
43 r->idiag_state = asoc->state;
44 if (timer_pending(t3_rtx)) {
45 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
46 r->idiag_retrans = asoc->rtx_data_chunks;
47 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
48 } else {
49 r->idiag_timer = 0;
50 r->idiag_retrans = 0;
51 r->idiag_expires = 0;
52 }
53}
54
55static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
56 struct list_head *address_list)
57{
58 struct sctp_sockaddr_entry *laddr;
59 int addrlen = sizeof(struct sockaddr_storage);
60 int addrcnt = 0;
61 struct nlattr *attr;
62 void *info = NULL;
63
64 list_for_each_entry_rcu(laddr, address_list, list)
65 addrcnt++;
66
67 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
68 if (!attr)
69 return -EMSGSIZE;
70
71 info = nla_data(attr);
72 list_for_each_entry_rcu(laddr, address_list, list) {
73 memcpy(info, &laddr->a, sizeof(laddr->a));
74 memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
75 info += addrlen;
76 }
77
78 return 0;
79}
80
81static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
82 struct sctp_association *asoc)
83{
84 int addrlen = sizeof(struct sockaddr_storage);
85 struct sctp_transport *from;
86 struct nlattr *attr;
87 void *info = NULL;
88
89 attr = nla_reserve(skb, INET_DIAG_PEERS,
90 addrlen * asoc->peer.transport_count);
91 if (!attr)
92 return -EMSGSIZE;
93
94 info = nla_data(attr);
95 list_for_each_entry(from, &asoc->peer.transport_addr_list,
96 transports) {
97 memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
98 memset(info + sizeof(from->ipaddr), 0,
99 addrlen - sizeof(from->ipaddr));
100 info += addrlen;
101 }
102
103 return 0;
104}
105
106
107static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
108 struct sk_buff *skb,
109 const struct inet_diag_req_v2 *req,
110 struct user_namespace *user_ns,
111 int portid, u32 seq, u16 nlmsg_flags,
112 const struct nlmsghdr *unlh,
113 bool net_admin)
114{
115 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
116 struct list_head *addr_list;
117 struct inet_diag_msg *r;
118 struct nlmsghdr *nlh;
119 int ext = req->idiag_ext;
120 struct sctp_infox infox;
121 void *info = NULL;
122
123 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
124 nlmsg_flags);
125 if (!nlh)
126 return -EMSGSIZE;
127
128 r = nlmsg_data(nlh);
129 BUG_ON(!sk_fullsock(sk));
130
131 if (asoc) {
132 inet_diag_msg_sctpasoc_fill(r, sk, asoc);
133 } else {
134 inet_diag_msg_common_fill(r, sk);
135 r->idiag_state = sk->sk_state;
136 r->idiag_timer = 0;
137 r->idiag_retrans = 0;
138 }
139
140 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
141 goto errout;
142
143 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
144 u32 mem[SK_MEMINFO_VARS];
145 int amt;
146
147 if (asoc && asoc->ep->sndbuf_policy)
148 amt = asoc->sndbuf_used;
149 else
150 amt = sk_wmem_alloc_get(sk);
151 mem[SK_MEMINFO_WMEM_ALLOC] = amt;
152 if (asoc && asoc->ep->rcvbuf_policy)
153 amt = atomic_read(&asoc->rmem_alloc);
154 else
155 amt = sk_rmem_alloc_get(sk);
156 mem[SK_MEMINFO_RMEM_ALLOC] = amt;
157 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
158 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
159 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
160 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
161 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
162 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
163 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
164
165 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
166 goto errout;
167 }
168
169 if (ext & (1 << (INET_DIAG_INFO - 1))) {
170 struct nlattr *attr;
171
172 attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
173 sizeof(struct sctp_info),
174 INET_DIAG_PAD);
175 if (!attr)
176 goto errout;
177
178 info = nla_data(attr);
179 }
180 infox.sctpinfo = (struct sctp_info *)info;
181 infox.asoc = asoc;
182 sctp_diag_get_info(sk, r, &infox);
183
184 addr_list = asoc ? &asoc->base.bind_addr.address_list
185 : &ep->base.bind_addr.address_list;
186 if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
187 goto errout;
188
189 if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
190 if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
191 goto errout;
192
193 if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
194 goto errout;
195
196 nlmsg_end(skb, nlh);
197 return 0;
198
199errout:
200 nlmsg_cancel(skb, nlh);
201 return -EMSGSIZE;
202}
203
204
205struct sctp_comm_param {
206 struct sk_buff *skb;
207 struct netlink_callback *cb;
208 const struct inet_diag_req_v2 *r;
209 const struct nlmsghdr *nlh;
210 bool net_admin;
211};
212
213static size_t inet_assoc_attr_size(struct sctp_association *asoc)
214{
215 int addrlen = sizeof(struct sockaddr_storage);
216 int addrcnt = 0;
217 struct sctp_sockaddr_entry *laddr;
218
219 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
220 list)
221 addrcnt++;
222
223 return nla_total_size(sizeof(struct sctp_info))
224 + nla_total_size(1)
225 + nla_total_size(1)
226 + nla_total_size(1)
227 + nla_total_size(4)
228 + nla_total_size(addrlen * asoc->peer.transport_count)
229 + nla_total_size(addrlen * addrcnt)
230 + nla_total_size(sizeof(struct inet_diag_meminfo))
231 + nla_total_size(sizeof(struct inet_diag_msg))
232 + 64;
233}
234
235static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
236{
237 struct sctp_association *assoc = tsp->asoc;
238 struct sock *sk = tsp->asoc->base.sk;
239 struct sctp_comm_param *commp = p;
240 struct sk_buff *in_skb = commp->skb;
241 const struct inet_diag_req_v2 *req = commp->r;
242 const struct nlmsghdr *nlh = commp->nlh;
243 struct net *net = sock_net(in_skb->sk);
244 struct sk_buff *rep;
245 int err;
246
247 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
248 if (err)
249 goto out;
250
251 err = -ENOMEM;
252 rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
253 if (!rep)
254 goto out;
255
256 lock_sock(sk);
257 if (sk != assoc->base.sk) {
258 release_sock(sk);
259 sk = assoc->base.sk;
260 lock_sock(sk);
261 }
262 err = inet_sctp_diag_fill(sk, assoc, rep, req,
263 sk_user_ns(NETLINK_CB(in_skb).sk),
264 NETLINK_CB(in_skb).portid,
265 nlh->nlmsg_seq, 0, nlh,
266 commp->net_admin);
267 release_sock(sk);
268 if (err < 0) {
269 WARN_ON(err == -EMSGSIZE);
270 kfree_skb(rep);
271 goto out;
272 }
273
274 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
275 MSG_DONTWAIT);
276 if (err > 0)
277 err = 0;
278out:
279 return err;
280}
281
282static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
283{
284 struct sctp_endpoint *ep = tsp->asoc->ep;
285 struct sctp_comm_param *commp = p;
286 struct sock *sk = ep->base.sk;
287 struct sk_buff *skb = commp->skb;
288 struct netlink_callback *cb = commp->cb;
289 const struct inet_diag_req_v2 *r = commp->r;
290 struct sctp_association *assoc;
291 int err = 0;
292
293 lock_sock(sk);
294 list_for_each_entry(assoc, &ep->asocs, asocs) {
295 if (cb->args[4] < cb->args[1])
296 goto next;
297
298 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
299 r->id.idiag_sport)
300 goto next;
301 if (r->id.idiag_dport != htons(assoc->peer.port) &&
302 r->id.idiag_dport)
303 goto next;
304
305 if (!cb->args[3] &&
306 inet_sctp_diag_fill(sk, NULL, skb, r,
307 sk_user_ns(NETLINK_CB(cb->skb).sk),
308 NETLINK_CB(cb->skb).portid,
309 cb->nlh->nlmsg_seq,
310 NLM_F_MULTI, cb->nlh,
311 commp->net_admin) < 0) {
312 err = 1;
313 goto release;
314 }
315 cb->args[3] = 1;
316
317 if (inet_sctp_diag_fill(sk, assoc, skb, r,
318 sk_user_ns(NETLINK_CB(cb->skb).sk),
319 NETLINK_CB(cb->skb).portid,
320 cb->nlh->nlmsg_seq, 0, cb->nlh,
321 commp->net_admin) < 0) {
322 err = 1;
323 goto release;
324 }
325next:
326 cb->args[4]++;
327 }
328 cb->args[1] = 0;
329 cb->args[3] = 0;
330 cb->args[4] = 0;
331release:
332 release_sock(sk);
333 return err;
334}
335
336static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
337{
338 struct sctp_endpoint *ep = tsp->asoc->ep;
339 struct sctp_comm_param *commp = p;
340 struct sock *sk = ep->base.sk;
341 const struct inet_diag_req_v2 *r = commp->r;
342 struct sctp_association *assoc =
343 list_entry(ep->asocs.next, struct sctp_association, asocs);
344
345
346 if (tsp->asoc != assoc)
347 return 0;
348
349 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
350 return 0;
351
352 return 1;
353}
354
355static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
356{
357 struct sctp_comm_param *commp = p;
358 struct sock *sk = ep->base.sk;
359 struct sk_buff *skb = commp->skb;
360 struct netlink_callback *cb = commp->cb;
361 const struct inet_diag_req_v2 *r = commp->r;
362 struct net *net = sock_net(skb->sk);
363 struct inet_sock *inet = inet_sk(sk);
364 int err = 0;
365
366 if (!net_eq(sock_net(sk), net))
367 goto out;
368
369 if (cb->args[4] < cb->args[1])
370 goto next;
371
372 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
373 goto next;
374
375 if (r->sdiag_family != AF_UNSPEC &&
376 sk->sk_family != r->sdiag_family)
377 goto next;
378
379 if (r->id.idiag_sport != inet->inet_sport &&
380 r->id.idiag_sport)
381 goto next;
382
383 if (r->id.idiag_dport != inet->inet_dport &&
384 r->id.idiag_dport)
385 goto next;
386
387 if (inet_sctp_diag_fill(sk, NULL, skb, r,
388 sk_user_ns(NETLINK_CB(cb->skb).sk),
389 NETLINK_CB(cb->skb).portid,
390 cb->nlh->nlmsg_seq, NLM_F_MULTI,
391 cb->nlh, commp->net_admin) < 0) {
392 err = 2;
393 goto out;
394 }
395next:
396 cb->args[4]++;
397out:
398 return err;
399}
400
401
402static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
403 void *info)
404{
405 struct sctp_infox *infox = (struct sctp_infox *)info;
406
407 if (infox->asoc) {
408 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
409 r->idiag_wqueue = infox->asoc->sndbuf_used;
410 } else {
411 r->idiag_rqueue = sk->sk_ack_backlog;
412 r->idiag_wqueue = sk->sk_max_ack_backlog;
413 }
414 if (infox->sctpinfo)
415 sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
416}
417
418static int sctp_diag_dump_one(struct sk_buff *in_skb,
419 const struct nlmsghdr *nlh,
420 const struct inet_diag_req_v2 *req)
421{
422 struct net *net = sock_net(in_skb->sk);
423 union sctp_addr laddr, paddr;
424 struct sctp_comm_param commp = {
425 .skb = in_skb,
426 .r = req,
427 .nlh = nlh,
428 .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
429 };
430
431 if (req->sdiag_family == AF_INET) {
432 laddr.v4.sin_port = req->id.idiag_sport;
433 laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
434 laddr.v4.sin_family = AF_INET;
435
436 paddr.v4.sin_port = req->id.idiag_dport;
437 paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
438 paddr.v4.sin_family = AF_INET;
439 } else {
440 laddr.v6.sin6_port = req->id.idiag_sport;
441 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
442 sizeof(laddr.v6.sin6_addr));
443 laddr.v6.sin6_family = AF_INET6;
444
445 paddr.v6.sin6_port = req->id.idiag_dport;
446 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
447 sizeof(paddr.v6.sin6_addr));
448 paddr.v6.sin6_family = AF_INET6;
449 }
450
451 return sctp_transport_lookup_process(sctp_tsp_dump_one,
452 net, &laddr, &paddr, &commp);
453}
454
455static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
456 const struct inet_diag_req_v2 *r, struct nlattr *bc)
457{
458 u32 idiag_states = r->idiag_states;
459 struct net *net = sock_net(skb->sk);
460 struct sctp_comm_param commp = {
461 .skb = skb,
462 .cb = cb,
463 .r = r,
464 .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
465 };
466 int pos = cb->args[2];
467
468
469
470
471
472
473
474 if (cb->args[0] == 0) {
475 if (!(idiag_states & TCPF_LISTEN))
476 goto skip;
477 if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
478 goto done;
479skip:
480 cb->args[0] = 1;
481 cb->args[1] = 0;
482 cb->args[4] = 0;
483 }
484
485
486
487
488
489
490
491
492
493 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
494 goto done;
495
496 sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
497 net, &pos, &commp);
498 cb->args[2] = pos;
499
500done:
501 cb->args[1] = cb->args[4];
502 cb->args[4] = 0;
503}
504
505static const struct inet_diag_handler sctp_diag_handler = {
506 .dump = sctp_diag_dump,
507 .dump_one = sctp_diag_dump_one,
508 .idiag_get_info = sctp_diag_get_info,
509 .idiag_type = IPPROTO_SCTP,
510 .idiag_info_size = sizeof(struct sctp_info),
511};
512
513static int __init sctp_diag_init(void)
514{
515 return inet_diag_register(&sctp_diag_handler);
516}
517
518static void __exit sctp_diag_exit(void)
519{
520 inet_diag_unregister(&sctp_diag_handler);
521}
522
523module_init(sctp_diag_init);
524module_exit(sctp_diag_exit);
525MODULE_LICENSE("GPL");
526MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
527